input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
import csv
import re
from collections import defaultdict
from copy import deepcopy
from enum import Enum
import vcf
from rck.core.io import EXTERNAL_NA_ID, SVTYPE, write_adjacencies_to_destination, COPY_NUMBER, parse_segment_extra
from rck.core.structures import Strand, Position, Adjacency, AdjacencyType, Phasing, Segment
VCF_FILTER = "vcf_filter"
VCF_FILTER2 = "vcf_filter2"
STRIP_CHR = "strip_CHR"
APPEND_ID = "append_id"
EXTRA_FIELDS = "extra_fields"
ID_SUFFIX = "id_suffix"
SVTYPE2 = "svtype2"
EXTERNAL_NA_ID2 = "external_na_id2"
SVLEN = "svlen"
SUPPORT_READ_NAMES = "support_read_names"
def get_most_supported_strands(strands_list):
result_by_support = {}
for entry in strands_list:
strands, support = entry.split(":")
support = int(support)
result_by_support[support] = strands
max_support = max(result_by_support.keys())
return result_by_support[max_support]
def strip_chr(chr_string):
if chr_string.startswith("chr"):
return chr_string[3:]
return chr_string
def get_strand_string_from_alt_breakend_string(alt_breakend_string):
if alt_breakend_string.startswith("]") or alt_breakend_string.endswith("]"):
return "+"
return "-"
def get_strand_string_from_alt_breakend(alt_breakend):
return get_strand_string_from_alt_breakend_string(alt_breakend_string=str(alt_breakend))
def update_dict_with_vcf_info(target, vcf_record):
for key, value in vcf_record:
target[str(key).lower()] = value
return target
def get_inv_chrs_from_vcf_record(vcf_record):
chr11 = str(vcf_record.CHROM)
chr12 = chr11
chr21 = chr11
chr22 = chr11
return chr11, chr12, chr21, chr22
def get_inv_na1_chrs_from_vcf_record(vcf_record):
return str(vcf_record.CHROM), str(vcf_record.CHROM)
def get_inv_na2_chrs_from_vcf_record(vcf_record):
return get_inv_na1_chrs_from_vcf_record(vcf_record=vcf_record)
def get_inv_na1_coords_from_vcf_record(vcf_record):
return int(vcf_record.POS) - 1, int(vcf_record.INFO["END"])
def get_inv_na2_coords_from_vcf_record(vcf_record):
a, b = get_inv_na1_coords_from_vcf_record(vcf_record=vcf_record)
return a + 1, b + 1
def get_inv_na1_strands_from_vcf_record(vcf_record):
return Strand.FORWARD, Strand.FORWARD
def get_inv_na2_strands_from_vcf_record(vcf_record):
return Strand.REVERSE, Strand.REVERSE
def get_del_na_chrs_from_vcf_record(vcf_record):
return get_inv_na1_chrs_from_vcf_record(vcf_record=vcf_record)
def get_del_na_coords_from_vcf_record(vcf_record):
return int(vcf_record.POS) - 1, int(vcf_record.INFO["END"]) + 1
def get_del_na_strands_from_vcf_record(vcf_record):
return Strand.FORWARD, Strand.REVERSE
def get_dup_na_chrs_from_vcf_record(vcf_record):
return get_inv_na1_chrs_from_vcf_record(vcf_record=vcf_record)
def get_dup_na_coords_from_vcf_record(vcf_record):
return int(vcf_record.POS), int(vcf_record.INFO["END"])
def get_dup_na_strands_from_vcf_record(vcf_record):
return Strand.REVERSE, Strand.FORWARD
def get_strand_from_alt_breakend(vcf_recrod):
alt_string = str(vcf_recrod.ALT[0])
if alt_string.startswith("]") or alt_string.endswith("]"):
return Strand.FORWARD
return Strand.REVERSE
def get_vcf_records_from_file(vcf_file_name):
with open(vcf_file_name, "rt") as source:
return get_vcf_records_from_source(source=source)
def get_vcf_records_from_source(source):
result = []
vcf_reader = vcf.Reader(source)
if "RNAMES" in vcf_reader.infos:
vcf_reader.infos["RNAMES"] = vcf_reader.infos["RNAMES"]._replace(num='.')
for record in vcf_reader:
result.append(record)
return result
def get_vcf_records_by_ids(vcf_records):
return {r.ID: r for r in vcf_records}
def get_nas_from_lumpy_vcf_file(lumpy_vcf_file):
vcf_records = get_vcf_records_from_file(vcf_file_name=lumpy_vcf_file)
return get_nas_from_lumpy_vcf_records(lumpy_vcf_records=vcf_records)
def update_nas_ids(nas_by_ids_defaultdict, setup):
result = {}
for sv_id, nas in nas_by_ids_defaultdict.items():
if len(nas) > 1:
for cnt, na in enumerate(nas, start=1):
na.extra[EXTERNAL_NA_ID] += "_{cnt}".format(cnt=cnt)
if setup.get(APPEND_ID, True):
for na in nas:
if not na.extra[EXTERNAL_NA_ID].endswith(setup.get(ID_SUFFIX, "")):
na.extra[EXTERNAL_NA_ID] += "_{suffix}".format(suffix=setup.get(ID_SUFFIX, ""))
for na in nas:
na.extra[EXTERNAL_NA_ID] = na.extra[EXTERNAL_NA_ID].replace(":", "_")
result[na.extra[EXTERNAL_NA_ID]] = na
return result
PASS_FILTER_STR = "PASS"
def get_string_vcf_filter(record):
result = ",".join(map(str, record.FILTER)) if isinstance(record.FILTER, list) else str(record.FILTER)
if len(result) == 0:
result = PASS_FILTER_STR
return result
class StandardizedSVType(Enum):
INS = "INS"
DEL = "DEL"
DUP = "DUP"
INV = "INV"
TRA = "TRA"
def get_standardize_sv_type(adjacency: Adjacency):
# generic for all callers
if adjacency.position1.chromosome != adjacency.position2.chromosome:
return StandardizedSVType.TRA
strands = adjacency.position1.strand, adjacency.position2.strand
# generic for all callers
if strands == (Strand.REVERSE, Strand.FORWARD):
return StandardizedSVType.DUP
# generic for all callers
if strands[0] == strands[1]:
return StandardizedSVType.INV
lower_extra = {key.lower(): value for key, value in adjacency.extra.items()}
if "or_svtype" in lower_extra:
###
# covers Sniffles, PBSV, Delly, ...
###
if "ins" in lower_extra["or_svtype"].lower():
return StandardizedSVType.INS
###
# covers Sniffles, PBSV, Delly, ...
###
if "del" in lower_extra["or_svtype"].lower():
return StandardizedSVType.DEL
###
# more generic approach if both REF and ALT fields are present in the adjacency data
###
if "alt" in lower_extra and "ref" in lower_extra and all(map(lambda e: str(e).isalpha() and str(e) != "None", lower_extra["alt"])) and lower_extra["ref"].isalpha():
if isinstance(lower_extra["alt"], list):
alt = lower_extra["alt"][0]
else:
alt = lower_extra["alt"]
if len(alt) > len(lower_extra["ref"]):
return StandardizedSVType.INS
else:
return StandardizedSVType.DEL
###
# last resort based on positive/negative svlen
###
if SVLEN.lower() in lower_extra:
if isinstance(lower_extra[SVLEN.lower()], list):
length = int(float(lower_extra[SVLEN.lower()][0]))
else:
length = int(float(lower_extra[SVLEN.lower()]))
if length > 0:
return StandardizedSVType.INS
return StandardizedSVType.DEL
def update_adjacencies_svtype(adjacencies):
for adj in adjacencies:
adj_lower_extra = {key.lower(): value for key, value in adj.extra.items()}
or_key = "or_svtype"
if SVTYPE.lower() in adj_lower_extra:
adj_lower_extra[or_key] = adj_lower_extra["svtype"]
adj.extra = adj_lower_extra
adj.extra[SVTYPE.lower()] = get_standardize_sv_type(adjacency=adj).value
DUPLICATED_ENTRIES_EXTRA = {
"CHR2",
"END",
"STRANDS"
}
def clear_duplicated_entries_extra(extra):
to_remove = []
for key, value in extra.items():
if key.upper() in DUPLICATED_ENTRIES_EXTRA:
to_remove.append(key)
for key in to_remove:
extra.pop(key)
return extra
def get_nas_from_lumpy_vcf_records(lumpy_vcf_records, setup=None):
if setup is None:
setup = {}
records_by_ids = get_vcf_records_by_ids(vcf_records=lumpy_vcf_records)
nas_by_ids = defaultdict(list)
processed_vcf_ids = set()
for record in records_by_ids.values():
base_sv_id = str(record.ID).split("_")[0]
if base_sv_id in processed_vcf_ids:
continue
sv_type = record.INFO["SVTYPE"]
extra = {}
extra[VCF_FILTER] = get_string_vcf_filter(record=record)
if sv_type in ["DUP", "DEL", "BND"]:
if sv_type in ["DUP", "DEL"]:
if "_" in str(record.ID):
raise ValueError("Non standard id {sv_id} for a same-chromosome SV of type {sv_type}".format(sv_id=record.ID, sv_type=sv_type))
extra.update(record.INFO)
chr1, chr2 = get_dup_na_chrs_from_vcf_record(vcf_record=record)
if sv_type == "DUP":
strand1, strand2 = get_dup_na_strands_from_vcf_record(vcf_record=record)
coord1, coord2 = get_dup_na_coords_from_vcf_record(vcf_record=record)
else:
strand1, strand2 = get_del_na_strands_from_vcf_record(vcf_record=record)
coord1, coord2 = get_del_na_coords_from_vcf_record(vcf_record=record)
add_record_ref_alt_to_extra(sv_type, record, extra)
else:
if "_" not in str(record.ID):
raise ValueError("Non standard id {sv_id} for a BND SV".format(sv_id=record.ID))
extra.update(record.INFO)
chr1 = str(record.CHROM)
coord1 = int(record.POS)
mate_vcf_entry_id_list = record.INFO["MATEID"]
mate_vcf_entry_id = mate_vcf_entry_id_list[0]
mate_record = records_by_ids[mate_vcf_entry_id]
extra.update(mate_record.INFO)
extra[VCF_FILTER2] = get_string_vcf_filter(record=mate_record)
strand1 = get_strand_from_alt_breakend(vcf_recrod=mate_record)
strand2 = get_strand_from_alt_breakend(vcf_recrod=record)
chr2 = str(mate_record.CHROM)
coord2 = int(mate_record.POS)
if setup.get(STRIP_CHR, True):
chr1 = strip_chr(chr_string=chr1)
chr2 = strip_chr(chr_string=chr2)
pos1 = Position(chromosome=chr1, coordinate=coord1, strand=strand1)
pos2 = Position(chromosome=chr2, coordinate=coord2, strand=strand2)
extra[EXTERNAL_NA_ID] = base_sv_id
extra = clear_duplicated_entries_extra(extra=extra)
na = Adjacency(position1=pos1, position2=pos2, extra=extra)
nas_by_ids[base_sv_id].append(na)
processed_vcf_ids.add(base_sv_id)
elif sv_type == "INV":
if "_" in str(record.ID):
raise Exception("Non standard id {sv_id} for a same-chromosome SV of type {sv_type}".format(sv_id=record.ID, sv_type=sv_type))
extra1 = deepcopy(record.INFO)
extra1[VCF_FILTER] = get_string_vcf_filter(record=record)
extra2 = deepcopy(record.INFO)
extra2[VCF_FILTER] = get_string_vcf_filter(record=record)
chr11, chr12 = get_inv_na1_chrs_from_vcf_record(vcf_record=record)
chr21, chr22 = get_inv_na2_chrs_from_vcf_record(vcf_record=record)
coord11, coord12 = get_inv_na1_coords_from_vcf_record(vcf_record=record)
coord21, coord22 = get_inv_na2_coords_from_vcf_record(vcf_record=record)
strand11, strand12 = get_inv_na1_strands_from_vcf_record(vcf_record=record)
strand21, strand22 = get_inv_na2_strands_from_vcf_record(vcf_record=record)
if setup.get(STRIP_CHR, True):
chr11 = strip_chr(chr_string=chr11)
chr12 = strip_chr(chr_string=chr12)
chr21 = strip_chr(chr_string=chr21)
chr22 = strip_chr(chr_string=chr22)
pos11 = Position(chromosome=chr11, coordinate=coord11, strand=strand11)
pos12 = Position(chromosome=chr12, coordinate=coord12, strand=strand12)
pos21 = Position(chromosome=chr21, coordinate=coord21, strand=strand21)
pos22 = Position(chromosome=chr22, coordinate=coord22, strand=strand22)
extra1[EXTERNAL_NA_ID] = base_sv_id
extra2[EXTERNAL_NA_ID] = base_sv_id
extra1 = clear_duplicated_entries_extra(extra=extra1)
extra2 = clear_duplicated_entries_extra(extra=extra2)
na1 = Adjacency(position1=pos11, position2=pos12, extra=extra1)
na2 = Adjacency(position1=pos21, position2=pos22, extra=extra2)
nas_by_ids[base_sv_id].append(na1)
nas_by_ids[base_sv_id].append(na2)
processed_vcf_ids.add(base_sv_id)
else:
raise Exception("Unknown SVTYPE {sv_type}".format(sv_type=sv_type))
nas_by_ids = update_nas_ids(nas_by_ids_defaultdict=nas_by_ids, setup=setup)
update_adjacencies_svtype(adjacencies=nas_by_ids.values())
return list(nas_by_ids.values())
def get_nas_from_longranger_vcf_records(longranger_vcf_records, setup=None):
if setup is None:
setup = {}
records_by_ids = get_vcf_records_by_ids(vcf_records=longranger_vcf_records)
nas_by_ids = defaultdict(list)
processed_vcf_ids = set()
for record in records_by_ids.values():
sv_id_entries = str(record.ID).split("_")
sv_id = str(record.ID)
if sv_id in processed_vcf_ids:
continue
extra = deepcopy(record.INFO)
extra[VCF_FILTER] = get_string_vcf_filter(record=record)
sv_type = record.INFO["SVTYPE"]
if len(sv_id_entries) <= 2:
mate_present = False
else:
mate_present = True
assert mate_present == ("MATEID" in record.INFO)
if mate_present:
base_sv_id = "_".join(sv_id_entries[:-1])
for mate_vcf_id in record.INFO["MATEID"]:
mate_record = records_by_ids[mate_vcf_id]
extra.update(deepcopy(mate_record.INFO))
extra[VCF_FILTER2] = get_string_vcf_filter(record=mate_record)
mate_breakend = record.ALT[0]
record_breakend = mate_record.ALT[0]
chr1 = str(record.CHROM)
chr2 = str(mate_record.CHROM)
strand1 = Strand.from_pm_string(get_strand_string_from_alt_breakend(record_breakend))
strand2 = Strand.from_pm_string(get_strand_string_from_alt_breakend(mate_breakend))
coord1 = int(record.POS)
coord2 = int(mate_record.POS)
if setup.get(STRIP_CHR, True):
chr1 = strip_chr(chr_string=chr1)
chr2 = strip_chr(chr_string=chr2)
pos1 = Position(chromosome=chr1, coordinate=coord1, strand=strand1)
pos2 = Position(chromosome=chr2, coordinate=coord2, strand=strand2)
extra[EXTERNAL_NA_ID] = base_sv_id
na = Adjacency(position1=pos1, position2=pos2, extra=extra)
nas_by_ids[base_sv_id].append(na)
processed_vcf_ids.add(mate_vcf_id)
processed_vcf_ids.add(sv_id)
else:
if sv_type == "UNK":
continue
base_sv_id = "_".join(sv_id_entries)
if sv_type == "INV":
chr11, chr12 = get_inv_na1_chrs_from_vcf_record(vcf_record=record)
chr21, chr22 = get_inv_na2_chrs_from_vcf_record(vcf_record=record)
coord11, coord12 = get_inv_na1_coords_from_vcf_record(vcf_record=record)
coord21, coord22 = get_inv_na2_coords_from_vcf_record(vcf_record=record)
strand11, strand12 = get_inv_na1_strands_from_vcf_record(vcf_record=record)
strand21, strand22 = get_inv_na2_strands_from_vcf_record(vcf_record=record)
if setup.get(STRIP_CHR, True):
chr11 = strip_chr(chr_string=chr11)
chr12 = strip_chr(chr_string=chr12)
chr21 = strip_chr(chr_string=chr21)
chr22 = strip_chr(chr_string=chr22)
pos11 = Position(chromosome=chr11, coordinate=coord11, strand=strand11)
pos12 = Position(chromosome=chr12, coordinate=coord12, strand=strand12)
pos21 = Position(chromosome=chr21, coordinate=coord21, strand=strand21)
pos22 = Position(chromosome=chr22, coordinate=coord22, strand=strand22)
extra[EXTERNAL_NA_ID] = base_sv_id
extra1 = deepcopy(extra)
extra2 = deepcopy(extra)
na1 = Adjacency(position1=pos11, position2=pos12, extra=extra1)
na2 = Adjacency(position1=pos21, position2=pos22, extra=extra2)
nas_by_ids[base_sv_id].append(na1)
nas_by_ids[base_sv_id].append(na2)
processed_vcf_ids.add(base_sv_id)
elif sv_type in ["DEL", "DUP"]:
chr1 = str(record.CHROM)
if setup.get(STRIP_CHR, True):
chr1 = strip_chr(chr_string=chr1)
chr2 = chr1
coord1 = int(record.POS)
coord2 = int(record.INFO["END"])
if sv_type == "DUP":
strand1 = Strand.REVERSE
strand2 = Strand.FORWARD
else:
strand1 = Strand.FORWARD
strand2 = Strand.REVERSE
extra[EXTERNAL_NA_ID] = base_sv_id
pos1 = Position(chromosome=chr1, coordinate=coord1, strand=strand1)
pos2 = Position(chromosome=chr2, coordinate=coord2, strand=strand2)
add_record_ref_alt_to_extra(sv_type, record, extra)
na = Adjacency(position1=pos1, position2=pos2, extra=extra)
nas_by_ids[base_sv_id].append(na)
processed_vcf_ids.add(base_sv_id)
else:
raise Exception("Unknown SV type ({svtype}) for longranger".format(svtype=sv_type))
nas_by_ids = update_nas_ids(nas_by_ids_defaultdict=nas_by_ids, setup=setup)
return list(nas_by_ids.values())
def add_record_ref_alt_to_extra(sv_type, record, extra):
if any(stand_svtype.lower() in sv_type.lower() for stand_svtype in ["ins", "del"]) and hasattr(record.ALT[0], "sequence"):
extra["ALT"] = [alt.sequence for alt in record.ALT]
if len(record.REF) > 0:
extra["REF"] = record.REF
def get_nas_from_manta_vcf_records(manta_vcf_records, setup=None):
nas_by_ids = defaultdict(list)
if setup is None:
setup = {}
records_by_ids = get_vcf_records_by_ids(vcf_records=manta_vcf_records)
processed_ids = set()
for record in records_by_ids.values():
extra = deepcopy(record.INFO)
extra[VCF_FILTER] = get_string_vcf_filter(record=record)
svtype = record.INFO["SVTYPE"]
record_id = str(record.ID)
if record_id in processed_ids:
continue
if svtype == "BND":
mate_vcf_id = record.INFO["MATEID"][0]
if mate_vcf_id not in records_by_ids:
continue
mate_record = records_by_ids[mate_vcf_id]
chr1 = str(record.CHROM)
chr2 = str(mate_record.CHROM)
coord1 = int(record.POS)
coord2 = int(mate_record.POS)
strand1 = Strand.from_pm_string(string=get_strand_string_from_alt_breakend(alt_breakend=mate_record.ALT))
strand2 = Strand.from_pm_string(string=get_strand_string_from_alt_breakend(alt_breakend=record.ALT))
if setup.get(STRIP_CHR, True):
chr1, chr2 = strip_chr(chr_string=chr1), strip_chr(chr_string=chr2)
pos1 = Position(chromosome=chr1, coordinate=coord1, strand=strand1)
pos2 = Position(chromosome=chr2, coordinate=coord2, strand=strand2)
extra[EXTERNAL_NA_ID] = str(record.ID)
extra[VCF_FILTER2] = get_string_vcf_filter(record=mate_record)
extra[EXTERNAL_NA_ID2] = mate_vcf_id
extra | |
from math import ceil
from os import (
urandom,
)
from os.path import (
dirname,
join as ojoin,
realpath,
)
from eth_account.account import (
Account,
)
from eth_account.hdaccount.deterministic import (
PRIVATE,
bip32_ckd,
bip32_deserialize,
bip32_master_key,
bip32_privtopub,
)
from eth_account.hdaccount.mnemonic import (
entropy_to_words,
mnemonic_to_seed,
)
from eth_account.hdaccount.utils import (
decompress,
)
from eth_account.signers.base import (
BaseAccount,
)
from eth_keys import (
KeyAPI,
)
default_wordlist = ojoin(dirname(realpath(__file__)),
"hdaccount/wordlist/bip39_english.txt")
class HDAccount(BaseAccount):
'''
This class manages BIP32 HD-Accounts for Ethereum
'''
def __init__(self, encoded_key="", path=[]):
'''
Constructor for this class. Initializes an hd account generator and
if possible the encoded key and the derivation path. If no arguments are
specified, create a new account with createAccount(...) or initialize
an account given a mnemonic and an optional password with initAccount(...)
:param str encoded_key : (OPTIONAL) BIP32 serialized key
:param path : (OPTIONAL) derivation path, this is good to have
but not necessary. Only relevant if you specify
the encoded_key parameter
:type path : list as [idx_0, ..., idx_n] or str as either
"idx_0/.../idx_n" or "m/idx_0/.../idx_n"
'''
# Magic number for hardened key derivation (see BIP32)
self._const_hardened = 0x80000000
# Contains derivation path. The class will automatically fill this
self._path = []
if isinstance(path, list):
if len(path) != 0:
for elem in path:
# Throws ValueError if elements are no base10 numbers
self._path.append(int(elem))
elif isinstance(path, str):
self._path = self.decodePath(path)
else:
raise TypeError("path has to be a list or a string")
# Contains either encoded private key (hardened derivation possible)
# or public key
if not isinstance(encoded_key, str):
raise TypeError("Encoded Key has to be a string")
# Before assigning the key, check if it has the correct format
if len(encoded_key) != 0:
try:
bip32_deserialize(encoded_key)
except Exception as e:
raise ValueError("encoded_key malformed: Not in bip32 serialized format.\n"
"Additional informations: %s" % e)
self.__key = encoded_key
# Initiates the account generator
self.__accgen = self._accountGenerator()
self.__accgen.send(None)
def _accountGenerator(self, cid=0):
'''
This is the account generator used to derive all desired
children keys. It is ought to be used only internally.
You can either send None to this generator, in which case it
just increments the index and returns you the derived child object
for that index or you can send an index. Use self.__accgen to interact
with this generator.
:param int cid: Child index, leave empty to continue with last index + 1
'''
# This variable will contain the object of the last derived child
newacc = None
curindex = cid
while True:
cid = yield newacc
# cid will be type and value checked and. If it is greater zero,
# the next child index will be cid. If cid is const_magic_hardened,
# the current index will be incremented by the hardened child
# derivation constant
if cid is not None:
if not isinstance(cid, int):
raise TypeError("Invalid child index type. Excepted int")
# else
if cid >= 0:
curindex = cid
else:
# cid == -1 means that we increase the old index by the
# hardened constant
if cid != -1:
raise ValueError("Invalid child index %d" % cid)
# else
if curindex < self._const_hardened:
curindex += self._const_hardened
# Derive child. Will throw an error if hardened derivation is choosen
# and only a pubkey is present
newpath = self._path.copy()
newpath.append(curindex)
newacc = HDAccount(bip32_ckd(self.__key, curindex), newpath)
# increment index and yield new HDAccount object
curindex += 1
def deriveChild(self, cid=None, hardened=False):
'''
This function generates a new account by using the
__accountGenerator function. You can specify an index.
Not specifying an index leads to the usage of the old index + 1
:param int cid : (OPTIONAL) contains child index. Leave empty to use
the previous index + 1
:param bool hardened: (OPTIONAL) if set to true, 0x80000000 will be added
to cid which leads to hardened key derivation.
:rtype HDAccount
'''
# catch invalid argument type error, otherwise it will break our generator
if cid is not None and not isinstance(cid, int):
raise TypeError("Excepted integer or None as index")
if not isinstance(hardened, bool):
raise TypeError("Excepted bool for hardened")
if isinstance(cid, int) and hardened is True and not cid >= self._const_hardened:
cid += self._const_hardened
if cid is not None and cid < 0:
raise ValueError("Negative child index not allowed")
if cid is None and hardened is True:
cid = -1
return self.__accgen.send(cid)
def derivePath(self, path):
'''
This function receives a derivation path and returns
an HDAccount object for the given path
:param path : contains the derivation path, either formated as
"(m/)idx_0/.../idx_n" or [idx_0, ..., idx_n]
:type path : str or list
:returns : HDAccount object for the desired path
:rtype HDAccount
'''
if isinstance(path, list):
enc_path = path
elif isinstance(path, str):
enc_path = self.decodePath(path)
else:
raise TypeError("path must be list or str in format (m/)/idx_1/.../idx_n")
hdacc = self
for idx in enc_path:
hdacc = hdacc.deriveChild(idx)
return hdacc
def createAccount(self, password="", ent_bytes=32, wordlist=None):
'''
This function initiates an account from scratch
After completing the initiation it returns the mnemonic code
:param list password : (OPTIONAL) additional password for mnemonic
:param list ent_bytes : (OPTIONAL) amount of entropy bytes for generations of the
mnemonic code has to be in [16, 20, 24, 28, 32]
:param str wordlist : (OPTIONAL) path to wordlist including name of wordlist,
defaults to english
:returns : mnemonic code
:rtype str
'''
# generate mnemonic (uses english word book by default)
# the function will perfom the value check for entropy bytes
if wordlist is None:
wordlist = default_wordlist
with open(wordlist) as f_wl:
wordlist_content = f_wl.readlines()
mnemonic = entropy_to_words(urandom(ent_bytes), wordlist_content)
self.initAccount(mnemonic, password)
return " ".join(mnemonic)
def initAccount(self, mnemonic="", password="", seed=b''):
'''
This function initiates an account by using a mnemonic code
and an optional password
:param mnemonic : the mnemonic code to derive the master keys from
:type mnemonic : str or list
:param str password : (OPTIONAL) the password required to successfully
derive the master keys from the mnemonic code
:param seed : the seed to derive the master key from (alternative)
:type seed : str (hex formated) or bytes
'''
if not (isinstance(mnemonic, str) or isinstance(mnemonic, list)) \
or not isinstance(password, str):
raise TypeError("Mnemonic has to be formated as a list or a string "
" and password as string")
if not isinstance(seed, str) and not isinstance(seed, bytes):
raise TypeError("Seed has to be a string or bytes")
if (mnemonic != "" and seed != b'') or (mnemonic == "" and seed == b''):
raise ValueError("Either a mnemonic or a seed has to be defined")
if mnemonic != "":
if isinstance(mnemonic, str):
drvseed = mnemonic_to_seed(mnemonic.encode("utf-8"), password.encode("utf-8"))
else:
drvseed = mnemonic_to_seed(" ".join(mnemonic).encode("utf-8"),
password.encode("utf-8"))
else:
if isinstance(seed, bytes):
drvseed = seed
else:
lenbytes = ceil(len(seed[2:]) / 2) if seed.startswith("0x") \
else ceil(len(seed) / 2)
drvseed = int(seed, 16).to_bytes(lenbytes, "big")
# Create seed from mnemonic and derive key (in bip32 serialization format)
self.__key = bip32_master_key(drvseed)
self._path = []
def decodePath(self, path):
'''
Converts the default string representation of a path to the internal
representation using a list.
:param str path : the string representation of the derivation path
:returns : list containing the derivation path
:rtype : list
'''
if not isinstance(path, str):
raise TypeError("Excepted path as string")
pathlist = path.split("/")
# Basic (incomplete) checks if the path fits the standard notation
if not path[0].isdigit():
if not path[0].lower() == 'm':
raise ValueError("Excepted path in the form m/idx1/idx2/... or"
" idx1/idx2/...")
else:
if len(pathlist) == 1:
raise ValueError("Excepted path in the form m/idx1/idx2/... or"
" idx1/idx2/...")
pathlist = pathlist[1:] if pathlist[0].lower() == 'm' else pathlist
return [int(elem) if not elem[-1].lower() == 'h' else
int(elem[:-1]) + self._const_hardened for elem in pathlist]
def removePrivateKey(self):
'''
Removes a private key from this object and replaces it with a public key.
From this moment on, only public keys can be derived from this object.
'''
if (self.__key == ""):
return
self.__key = bip32_privtopub(self.__key)
@property
def path(self):
'''
Returns the derivation path as a string of the form m/idx1/idx2 if the
current | |
# -*- coding: utf-8 -*-
import sys
import os
import numpy as np
import fourier as ff
import matplotlib
import warnings
from matplotlib import pyplot as plt
from os.path import isfile
matplotlib.use('Agg')
def warn(*args, **kwargs):
print('WARNING: ', *args, file=sys.stderr, **kwargs)
def fit_validate_model(model, x: np.array, y: np.array, train_index, val_index, weights: np.array = None):
x_t, x_v = x[train_index], x[val_index]
y_t, y_v = y[train_index], y[val_index]
if weights is not None:
weights_t, weights_v = weights[train_index], weights[val_index]
else:
weights_t = None
weights_v = None
# print("y_train:")
# print(y_t)
model.fit(x_t, y_t, weights=weights_t)
yhat_v = model.predict(x_v)
return y_v, yhat_v, weights_v
def get_stratification_labels(data, n_folds):
"""
Create an array of stratification labels from an array of continuous values to be used in a stratified cross-
validation splitter.
:param data: list or numpy.ndarray
The input data array.
:param n_folds: int
The number of cross-validation folds to be used with the output labels.
:return: labels, numpy.ndarray
The array of integer stratification labels.
"""
assert isinstance(data, np.ndarray or list), "data must be of type list or numpy.ndarray"
if isinstance(data, list):
data = np.array(data)
ndata = len(data)
isort = np.argsort(data) # Indices of sorted phases
labels = np.empty(ndata)
labels[isort] = np.arange(ndata) # Compute phase order
labels = np.floor(labels / n_folds) # compute phase labels for StratifiedKFold
if np.min(np.bincount(labels.astype(int))) < n_folds: # If too few elements are with last label, ...
labels[labels == np.max(labels)] = np.max(
labels) - 1 # ... the then change that label to the one preceding it
return labels
def write_results(pars, results: dict):
# check if the file already exists:
newfile = not isfile(os.path.join(pars.rootdir, pars.output_param_file))
with open(os.path.join(pars.rootdir, pars.output_param_file), 'a') as file:
if newfile:
# Write header:
if pars.compute_errors:
file.write('# id Nep period totamp A1 A2 A3 A1_e A2_e A3_e phi1 phi2 phi3 '
'phi1_e phi2_e phi3_e phi21 phi21_e phi31 phi31_e '
'meanmag meanmag_e cost aper phcov phcov2 snr ZPErr Npt order minmax')
else:
file.write('# id Nep period totamp A1 A2 A3 phi1 phi2 phi3 phi21 phi31 meanmag cost '
'aper phcov phcov2 snr ZPErr Npt order minmax')
if pars.feh_model_file is not None:
file.write(' FeH')
if pars.compute_errors:
file.write(' FeH_e')
if pars.pca_model_file is not None:
file.write(' E1 E2 E3 E4 E5 E6')
if pars.compute_errors:
file.write(' E1_e E2_e E3_e E4_e E5_e E6_e')
file.write('\n')
# ------------------------
if pars.compute_errors:
file.write(
"%s %4d %.6f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.4f %.4f %.3f %.3f "
"%.3f %.3f %.4f %d %.3f %.3f %.1f %.4f %4d %2d %.3f" %
(results['objname'], results['nepoch'], results['period'], results['tamp'],
results['A'][0], results['A'][1], results['A'][2],
results['A_std'][0], results['A_std'][1], results['A_std'][2],
results['Pha'][0], results['Pha'][1], results['Pha'][2],
results['Pha_std'][0], results['Pha_std'][1], results['Pha_std'][2],
results['phi21'], results['phi21_std'], results['phi31'], results['phi31_std'],
results['icept'], results['icept_std'], results['cost'], results['dataset'] + 1,
results['phcov'], results['phcov2'], results['snr'], results['totalzperr'],
results['ndata'], results['forder'], results['minmax']))
else:
file.write("%s %4d %.6f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.4f %.4f %.3f "
"%.4f %d %.3f %.3f %.1f %.4f %4d %2d %.3f" %
(results['objname'], results['nepoch'], results['period'], results['tamp'],
results['A'][0], results['A'][1], results['A'][2],
results['Pha'][0], results['Pha'][1], results['Pha'][2],
results['phi21'], results['phi31'],
results['icept'], results['cost'], results['dataset'] + 1,
results['phcov'], results['phcov2'], results['snr'], results['totalzperr'],
results['ndata'], results['forder'], results['minmax']))
if pars.feh_model_file is not None:
file.write(" %.3f" % results['feh'])
if pars.compute_errors:
file.write(" %.3f" % results['feh_std'])
if pars.pca_model_file is not None:
file.write(" %.6f %.6f %.6f %.6f %.6f %.6f" %
(results['pca_feat'][0], results['pca_feat'][1], results['pca_feat'][2],
results['pca_feat'][3], results['pca_feat'][4], results['pca_feat'][5]))
if pars.compute_errors:
file.write(" %.6f %.6f %.6f %.6f %.6f %.6f" %
(results['pca_feat_std'][0], results['pca_feat_std'][1], results['pca_feat_std'][2],
results['pca_feat_std'][3], results['pca_feat_std'][4], results['pca_feat_std'][5]))
file.write("\n")
def write_merged_datafile(pars, results: dict):
# check if the file already exists:
newfile = not isfile(os.path.join(pars.rootdir, pars.merged_output_datafile))
with open(os.path.join(pars.rootdir, pars.merged_output_datafile), 'a') as file:
if newfile:
file.write('# id time mag mag_err ZP_err\n')
outarr = np.rec.fromarrays((np.tile(results['objname'], results['ndata']),
results['otime'] + results['otime0'],
results['mag'], results['magerr'], results['zperr']))
np.savetxt(file, outarr, fmt='%s %.6f %.3f %.3f %.3f')
def write_single_datafile(pars, results: dict, phase_ext_neg=0, phase_ext_pos=1.2):
ophase_sorted, mag_sorted = extend_phases(results['ph'], results['mag'],
phase_ext_neg=phase_ext_neg, phase_ext_pos=phase_ext_pos, sort=True)
outarr = np.rec.fromarrays((ophase_sorted, mag_sorted), names=('phase', 'kmag'))
with open(os.path.join(pars.rootdir, pars.output_data_dir, results['objname'] + '.dat'), 'w') as file:
np.savetxt(file, outarr, fmt='%f %f')
if pars.fold_double_period:
ophase_sorted2, mag_sorted2 = extend_phases(results['ph_2p'], results['mag'],
phase_ext_neg=phase_ext_neg, phase_ext_pos=phase_ext_pos, sort=True)
outarr = np.rec.fromarrays((ophase_sorted2, mag_sorted2), names=('phase', 'kmag'))
with open(os.path.join(pars.rootdir, pars.output_data_dir, results['objname'] + '_2p.dat'), 'w') as file:
np.savetxt(file, outarr, fmt='%f %f')
def write_synthetic_data(pars, results: dict):
if pars.gpr_fit:
outarr = np.rec.fromarrays((results['phase_grid'], results['synmag_gpr'] - results['icept']))
np.savetxt(os.path.join(pars.rootdir, pars.output_syn_dir,
results['objname'] + "_gpr" + pars.syn_suffix + '.dat'),
outarr, fmt='%.4f %.4f')
if pars.n_augment_data is not None:
outarr = np.hstack((results['phase_grid'].reshape(-1, 1), (results['synmag_gpr']).reshape(-1, 1), results['synmag_gpa']))
np.savetxt(os.path.join(pars.rootdir, pars.output_syn_dir,
results['objname'] + "_gpr_aug" + pars.syn_suffix + '.dat'),
outarr, fmt='%7.4f ' * (pars.n_augment_data + 2))
else:
outarr = np.rec.fromarrays((results['phase_grid'], results['syn'] - results['icept']))
np.savetxt(os.path.join(pars.rootdir, pars.output_syn_dir,
results['objname'] + "_dff" + pars.syn_suffix + '.dat'),
outarr, fmt='%.4f %.4f')
def make_figures(pars, results: dict, constrain_yaxis_range=True,
minphase=0, maxphase=1.2, aspect_ratio=0.6, figformat: str = 'png'):
# Create phase diagram:
outfile = os.path.join(pars.rootdir, pars.plot_dir, results['objname'] + pars.plot_suffix + "." + figformat)
plottitle = results['objname']
# plottitle = None
# figtext = '$P = {0:.6f}$ , $N_F = {1}$ , ap = {2}'.format(results['period'],results['forder'],bestap+1)
# figtext = '$P = {0:.6f}$'.format(results['period'])
figtext = '$P = {0:.6f}$ , $S/N = {1:d}$'.format(results['period'], int(results['snr']))
data1 = np.vstack((results['ph_o'], results['mag_o'], results['magerr_o'])).T
data2 = np.vstack((results['ph'], results['mag'], results['magerr'])).T
if pars.fourier_from_gpr:
data3 = np.vstack((results['phase_grid'], results['synmag_gpr'])).T
else:
data3 = np.vstack((results['phase_grid'], results['syn'])).T
# labels = ("orig.", "clipped", "binned", "DFF")
if pars.gpr_fit and pars.plot_gpr:
data4 = np.vstack((results['phase_grid'], results['synmag_gpr'], results['sigma_gpr'])).T
plot_input = (data1, data2, data3, data4)
fillerr_index = (3,)
symbols = ('r.', 'b.', 'r-', 'b-')
else:
plot_input = (data1, data2, data3)
fillerr_index = ()
symbols = ('r.', 'k.' 'r-')
plotlc(plot_input, symbols=symbols, fillerr_index=fillerr_index, figsave=pars.save_figures, outfile=outfile,
xlabel='phase', ylabel='$' + pars.waveband + '$ [mag.]', figtext=figtext, title=plottitle,
constrain_yaxis_range=constrain_yaxis_range, minphase=minphase, maxphase=maxphase,
aspect_ratio=aspect_ratio, figformat=figformat)
if pars.fold_double_period:
# Create phase diagram with double period:
outfile = os.path.join(pars.rootdir, pars.plot_dir, results['objname'] + pars.plot_suffix + "_2p." + figformat)
figtext = '$2P = {0:.6f}$'.format(results['period'] * 2, results['forder'], results['dataset'] + 1)
data1 = np.vstack(
(results['ph_o_2p'], results['mag_o'], np.sqrt(results['magerr_o'] ** 2 + results['zperr_o'] ** 2))).T
data2 = np.vstack(
(results['ph_2p'], results['mag'], np.sqrt(results['magerr'] ** 2 + results['zperr'] ** 2))).T
labels = ("orig.", "clipped")
plot_input = (data1, data2)
symbols = ('ro', 'ko')
plotlc(plot_input, symbols=symbols, fillerr_index=(), figsave=pars.save_figures, outfile=outfile,
xlabel='phase', ylabel='$' + pars.waveband + '$ [mag.]', figtext=figtext, title=results['objname'],
constrain_yaxis_range=True, figformat=figformat)
def read_input(fname: str, do_gls=False, known_columns=False):
"""
Reads the input list file with columns: object ID, [period, [dataset]]
:param fname: string, the name of the input file
:param do_gls: boolean, whether to perform GLS on the input time series. If False, the second column of the input
file must contain the period.
:param known_columns: boolean; whether the dataset to be used is known. If True, the last column of the input
file must contain the number of the column.
:return: ndarray(s) or None(s); 1-d arrays with the obect IDs, periods, and datasets
"""
dtypes = ['|S25'] # dtype for first column: identifiers
if do_gls:
if known_columns:
usecols = (0, 1)
dtypes = dtypes + ['i']
else:
usecols = (0,)
else:
if known_columns:
usecols = (0, 1, 2)
dtypes = dtypes + ['f8'] + ['i']
else:
usecols = (0, 1)
dtypes = dtypes + ['f8']
arr = np.genfromtxt(fname, usecols=usecols,
dtype=dtypes, unpack=False, comments='#', filling_values=np.nan, names=True)
object_id = arr['id'].reshape(-1, ).astype(str)
if do_gls:
object_per = None
else:
object_per = arr['period'].reshape(-1, )
if known_columns:
object_ap = arr['ap'].reshape(-1, )
else:
object_ap = None
return object_id, object_per, object_ap
def read_lc(lcfile, n_data_cols: int = 1, is_err_col: bool = False, flag_column: bool = False,
snr_column: bool = False, is_zperr_col: bool = False, missing_values="NaN", invalid_raise=False):
assert n_data_cols > 0, "`n_datasets` must be non-zero integer"
colnames = ['otime']
dtypes = [float]
ncols = 1
for ii in range(n_data_cols):
colnames.append('mag' + str(ii+1))
dtypes.append(float)
ncols += 1
if is_err_col:
# We expect the column following each magnitude column to contain the magnitude uncertainty
colnames.append('magerr' + str(ii + 1))
dtypes.append(float)
ncols += 1
if is_zperr_col:
# The last column is expected to contain the zero-point error:
colnames.append('zperr' + str(ii + 1))
dtypes.append(float)
ncols += 1
if snr_column:
# We expect the next column to contain the S/N
colnames.append('snr' + str(ii + 1))
dtypes.append(float)
ncols += 1
if flag_column:
# We expect the next column to contain the flag
colnames.append('flag' + str(ii + 1))
dtypes.append('|S10')
ncols += 1
used_cols = list(range(ncols))
# Read light curve:
lcdatain = np.genfromtxt(lcfile, unpack=False, comments='#', filling_values=np.nan,
dtype=dtypes, usecols=used_cols, missing_values=missing_values,
names=colnames, invalid_raise=invalid_raise)
print(lcfile + " found.")
lcdatain = lcdatain[~np.isnan(lcdatain['otime'])]
return lcdatain
def degrade_lc(otime, mag, magerr, zperr, period=1.0, remove_points=True, nkeep=50,
| |
s += "N" * (3 - (len(s) % 3) )
segment.mCds.append(cds_fragment)
assert(len(segment.mCds) == len(segment.mFragments))
# sanity check: are all sequence segments the same?
# do not compare boundary residues, as they might be different
# due to alternative splicing.
xset = set()
for x in segment.mFragments:
xset.add(x[1:-1])
# sequence segments might not be identical
# due to different locations of frameshifts within the
# sequences. In this case resolve by aligning
# the segments and providing the consensus alignment as
# sequence
if len(xset) > 1:
muscle = WrapperMuscle.Muscle()
mali = Mali.Mali()
for x in range(len(segment.mFragments)):
mali.addSequence(
segment.mMembers[x], 0, 0, segment.mFragments[x])
aligned = muscle.Run(mali)
pep_consensus = aligned.getConsensus()
if options.loglevel >= 6:
options.stdlog.write(
"# consensus peptide alignment:\n")
aligned.writeToFile(options.stdlog)
options.stdlog.write(pep_consensus + "\n")
# substitute for consensus
segment.mMali = aligned
segment.mFragments = [pep_consensus]
# do the same for the nucleotide fragments
# thread each sequence through the peptide alignment
if segment.mCds:
# muscle = WrapperMuscle.Muscle()
# mali = Mali.Mali()
# for x in range(len(segment.mCds)):
# mali.addSequence( segment.mMembers[x], 0, 0, segment.mCds[x] )
# aligned = muscle.Run( mali )
# cds_consensus = aligned.getConsensus( mark_with_gaps = True )
# if options.loglevel >= 6:
# options.stdlog.write("# consensus cds alignment:\n")
# aligned.writeToFile( options.stdlog )
# options.stdlog.write( cds_consensus + "\n" )
# options.stdlog.flush()
cds_consensus = ["-"] * len(pep_consensus) * 3
# mask inconsistent positions in the consensus peptide
# these are due to frameshifts within an exon
columns = segment.mMali.getColumns()
for c in range(len(columns)):
s = columns[c]
counts = [(a, s.count(a))
for a in set(list(s)).difference(set("-"))]
if len(counts) > 1:
cds_consensus[c * 3:c * 3 + 3] = ["N"] * 3
for c in range(len(segment.mCds)):
if options.loglevel >= 2:
options.stdlog.write(
"# building map between consensus peptide and %s.\n" % segment.mMembers[c])
options.stdlog.flush()
# build map of consensus cds to peptide cds
cons_map_p2c = alignlib_lite.py_makeAlignmentVector(
)
this_cds = segment.mCds[c]
try:
cons_map_p2c = getMapPeptide2Cds(pep_consensus,
this_cds,
options)
except ValueError, msg:
if options.loglevel >= 2:
options.stdlog.write(
"# Warning: sequence %s not mappable: %s\n" % (segment.mMembers[c], msg))
continue
for x in range(cons_map_p2c.getRowFrom(), cons_map_p2c.getRowTo()):
y = cons_map_p2c.mapRowToCol(x)
if y < 0:
continue
if cds_consensus[x] == "-":
cds_consensus[x] = this_cds[y]
cds_consensus = "".join(cds_consensus)
if options.loglevel >= 6:
options.stdlog.write(
"# consensus cds alignment %i, %i:\n" % (len(cds_consensus), len(pep_consensus)))
options.stdlog.write(cds_consensus + "\n")
options.stdlog.flush()
# substitute for consensus
segment.mCds = [cds_consensus]
else:
segment.mMali = None
if len(segment.mFragments) == 0:
continue
segment.mSequence = segment.mFragments[0]
segment.mCdsSequence = segment.mCds[0]
# assert(len(segment.mSequence) == len(segment.mCdsSequence) / 3 )
segments.append(segment)
return segments
def writeToFile(mali, section, options, is_aligned=True):
"""write mali to file."""
outfile = open(options.output_filename_pattern % section, "w")
mali.writeToFile(outfile, format=options.output_format)
outfile.close()
if is_aligned and not mali.checkLength():
raise "mali in file %s has entries of different lengths" % (
options.output_filename_pattern % section)
def main(argv=None):
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id: align_transcripts.py 2781 2009-09-10 11:33:14Z andreas $", usage=globals()["__doc__"])
parser.add_option("-m", "--master-identifier", dest="master", type="string",
help="master sequence.")
parser.add_option("-p", "--master-pattern", dest="master_pattern", type="string",
help="master pattern.")
parser.add_option("--master-species", dest="master_species", type="string",
help="species to use as master sequences.")
parser.add_option("-t", "--translate", dest="filename_translation", type="string",
help="filename on where to store translated sequences.")
parser.add_option("-e", "--exons-file", dest="filename_exons", type="string",
help="filename on where to exon information.")
parser.add_option("-g", "--gtf-file", dest="filename_gtf", type="string",
help="filename with exon information in gtf format.")
parser.add_option("-c", "--mark-codons", dest="mark_codons", action="store_true",
help="mark codons.")
parser.add_option("--remove-stops", dest="remove_stops", action="store_true",
help="remove stop codons.")
parser.add_option("--mask-stops", dest="mask_stops", action="store_true",
help="mask stop codons.")
parser.add_option("--mask-char", dest="mask_char", type="string",
help="masking character to use.")
parser.add_option("-f", "--remove-frameshifts", dest="remove_frameshifts", action="store_true",
help="remove columns corresponding to frameshifts.")
parser.add_option("-s", "--split-exons", dest="split_exons", action="store_true",
help="split columns aligned to different exons in the same gene.")
parser.add_option("-a", "--target", dest="target", type="choice",
choices=("paml", ),
help="perform cleaning up for certain targets.")
parser.add_option("--stop-at", dest="stop_at", type="choice",
choices=("aligned", "unaligned", "unpacked"),
help="stop at intermediate stage and dump out that alignment.")
parser.add_option("--force-map", dest="force_map", action="store_true",
help="force mapping of sequences that have changed to previous sequence.")
parser.add_option("--cds-gtf-file", dest="filename_cds", type="string",
help="""filename with cds - useful if you expect pseudogenes in your set. The peptide
sequences will be aligned to the cds sequences. This produces better coordinates.""" )
parser.add_option("--output-section", dest="output", type="choice", action="append",
choices=("final_aa", "final_na", "aligned_aa", "aligned_na",
"all", "unaligned_aa", "unaligned_na", "coords"),
help="which alignment to output: aligned=aligned sequences, but before untangling exons; "
" final=final multiple alignment; unaligned=unaligned sequences; "
" coords=genomic coordinates (corresponding to 'final_na').")
parser.add_option("--output-filename-pattern", dest="output_filename_pattern", type="string",
help="filename pattern for multiple alignment output files. "
" If no --output-section option is given, stdout is used.")
parser.add_option("--output-filename-coords", dest="output_filename_coords", type="string",
help="filename to output coordinates to.")
parser.add_option("--output-format", dest="output_format", type="choice",
choices=("fasta", "stockholm", "clustal", "plain-fasta"),
help="output format of multiple alignments.")
parser.add_option("--strict", dest="strict", action="store_true",
help="apply strict checking.")
parser.set_defaults(
gap_char="-",
mask_char="x",
gap_chars="-.",
separator="|",
master=None,
master_species=None,
filename_translation=None,
filename_exons=None,
filename_gtf=None,
master_pattern=None,
remove_stops=False,
mark_codons=False,
mask_unaligned=False,
split_exons=False,
remove_frameshifts=False,
min_segment_length=5,
mask_stops=False,
target=None,
sequence_separator="",
stop_at=None,
filename_cds=None,
force_map=False,
output_filename_pattern="%s.fasta",
output_filename_coords="coords.tsv.gz",
output=[],
output_format="fasta",
max_percent_gaps=0.1,
max_gaps=40,
strict=False,
)
(options, args) = E.Start(parser)
########################################################
if "all" in options.output:
options.output = ["final_aa", "final_na", "aligned_aa",
"aligned_na", "unaligned_aa", "unaligned_na", "coords"]
########################################################
########################################################
########################################################
# read unaligned sequences
input = Mali.Mali()
input.readFromFile(sys.stdin, format="fasta")
all_identifiers = input.getIdentifiers()
alphabet = input.getAlphabet()
E.info("sequences are of alphabet: %s" % alphabet)
if alphabet == "aa":
coordinate_factor = 3.0
# build list of selenoproteins
selenoproteins = set(
[x.mId for x in input.values() if "U" in x.mString.upper()])
else:
coordinate_factor = 1.0
selenoproteins = ()
########################################################
########################################################
########################################################
# read cds sequences
if options.filename_cds:
cds_sequences = Genomics.ReadPeptideSequences(
open(options.filename_cds, "r"))
map_peptide2cds = {}
nskipped = 0
for x in all_identifiers:
E.debug("building map_peptide2cds for sequence %s" % (x))
if x not in cds_sequences:
nskipped += 1
continue
try:
map_p2c = getMapPeptide2Cds(input[x],
cds_sequences[x],
options)
except ValueError, msg:
E.warn("sequence %s not mappable: %s" % (x, msg))
nskipped += 1
continue
num_peptide_gaps = len(re.sub("[^-]", "", input[x]))
ngaps = map_p2c.getNumGaps(
) - (num_peptide_gaps * 3) - abs(len(input[x] * 3) - len(cds_sequences[x]))
l = map_p2c.getLength()
if float(ngaps) / float(l) > options.max_percent_gaps or ngaps > options.max_gaps:
E.warn("map between cds to peptide has too many gaps: %s: %i gaps out of length %i" % (
x, ngaps, l))
nskipped += 1
continue
map_peptide2cds[x] = map_p2c
if options.loglevel >= 3:
f = alignlib_lite.py_AlignmentFormatEmissions(map_p2c)
options.stdlog.write("# p2c: " + "\t".join(map(str, (x, str(f),
len(input[x]), len(cds_sequences[x])))) + "\n")
E.info("built %i maps between peptides and cds - %i skipped" %
(len(map_peptide2cds),
nskipped))
else:
map_peptide2cds = {}
map_id2gene, map_gene2ids = buildGeneMap(all_identifiers)
########################################################
########################################################
########################################################
# read exon informmation
if options.filename_exons or options.filename_gtf:
# read exon boundaries and keep forward coordinates
if options.filename_exons:
exons = Exons.ReadExonBoundaries(open(options.filename_exons, "r"),
filter=set(all_identifiers),
from_zero=True)
elif options.filename_gtf:
exons = Exons.ReadExonBoundaries(open(options.filename_gtf, "r"),
filter=set(all_identifiers),
format="gtf",
gtf_extract_id=re.compile(
'transcript_id \"(\S+)\";'),
from_zero=True)
########################################################
########################################################
########################################################
# Exon boundaries contain the fields mPeptideFrom and mPeptideTo.
# These fields correspond to the cds coordinates, not peptide coordinates.
# This is no problem, if there are no frameshifts. However, in the presence
# of frameshifts, these two coordinates will be different. Thus, if cds
# sequences are given, the exon boundaries are corrected, such that
# mPeptideFrom and mPeptideTo contain the peptide coordinates and
# mCdsFrom and mCdsTo contain the cds coordinates.
########################################################
########################################################
########################################################
# adjust exon boundaries
if map_peptide2cds:
# if map_peptide2cds is given, pseudogenes can be treated properly
E.info("checking exon boundaries.")
nmissing, ndifferences, nstop_codons, ndeleted_empty, nunmappable = 0, 0, 0, 0, 0
# minimum genomic coordinates and strands for a gene
genome_starts = {}
# maps of cds sequence to genomic coordinates. These are
# zeroed and increasing for both forward and reverse strand
map_cds2genome = {}
for key, ee in exons.items():
if key not in map_peptide2cds:
nmissing += 1
continue
map_p2c = map_peptide2cds[key]
# a patch to eliminate empty last exons
if ee[-1].mPeptideTo == ee[-1].mPeptideFrom and \
ee[-1].mGenomeTo == ee[-1].mGenomeFrom:
E.warn("%s of length %i: deleting empty last exon: %s." %
(key, len(input[key]), str(ee[-1])))
del ee[-1]
ndeleted_empty += 1
if ee[-1].mPeptideTo != map_p2c.getColTo():
E.debug("%s" % str(ee[-1]))
E.warn("%s of length %i: peptide and exon do not correspond: %i != %i" %
(key, len(input[key]), ee[-1].mPeptideTo, map_peptide2cds[key].getColTo()))
ndifferences += 1
d = ee[-1].mPeptideTo - map_peptide2cds[key].getColTo()
if d == 3:
E.warn(
"%s: assuming difference is stop codon - exon shortened.")
nstop_codons += 1
elif d > 0:
E.warn(
"%s: fixing difference of %i nucleotides - incomplete stop-codon?" % (key, d))
else:
# if the exon information | |
= 12098618010582908146005387418068214530897837924954238474768639057877490835545707924234415267192522442378424554055618356812999593976451240454748132615211091
p2 = 114078116454996138073318170170395300151527904793534256191938789983399536922395777111499295202803369554422196999085171496293035396121701314031895628788412353005299652082324755433547975515470738465391276343420364306790694479071514320422685064042719135179664690266371525865249047670187055110695514824881157627139
q2 = 6947349788273330265284965959588633765145668297542467009935686733076998478802274287263210169428313906535572268083136251282544180080959668222544545924665987
p3 = 114078116454996138073318170170395300151527904793534256191938789983399536922395777111499295202803369554422196999085171496293035396121701314031895628788412353005299652082324755433547975515470738465391276343421225512127678851876291564787861171689610002001450319286946495752591223718157676932258249173072665300213
q3 = 9266126880388093025412332663804790639778236438889018854356539267369792799981733933428697598363851162957322580350270024369332640344413674817822906997102161
p4 = 114078116454996138073318170170395300151527904793534256191938789983399536922395777111499295202803369554422196999085171496293035396121701314031895628788412353005299652082324755433547975515470738465391276343421356808531436971239501427225110998678228016324130962852291540962098563998522061844259409194324238072163
q4 = 9346194396330429861097524187193981265347523161493757436812567448933497111978504926263282763464402757659318174531608519618989854444686100976857830087136899
moduli = [p1 * q1, p2 * q2, p3 * q3, p4 * q4]
for i, (p, q) in enumerate(self.implicit.factorize_msb(moduli, p_bitsize + q_bitsize, shared_bitsize)):
self.assertIsInstance(p, int)
self.assertIsInstance(q, int)
self.assertEqual(moduli[i], p * q)
p_bitsize = 1024
q_bitsize = 512
shared_bitsize = 684
p1 = 137676848178120053804151859930883725890803026594890273621717986880391033552896124307278203769389114417028688066268898176276364165645879838855204653941267370118703755611397682095578076818071918172477401067278492828257626897251549091543352809233324240524137497086302474085899298902638892888908168338819819232793
q1 = 13166288667078358159532363247770104519199514211373352701434198635956864629466947059508438393840310722732010695913860165840076158141600542903957511858467599
p2 = 155941871148496045943650517403022286219330266513190620694534749227433871940120353353030481603047425408777193957891989215447984590279121382305371103889682866866611645183334486259197241694690077730091496562828758139564286098307121800141566950170972849436331381375112592397181935508950663666559821018117710798361
q2 = 8054287780708269262514472947823359228967255917411384941738106945448488928023325871002415540629545474428145043227927492187948846465762213369395150593287629
p3 = 146542545226083477723264700810318219628590283511298968176573337385538577833243759669492317165475590615268753085678168828004241411544898671318095131587338794716729315057151379325654916607098703691695457183186825995894712193071356602411894624624795802572705076938306979030565015683237625719989339343497095536153
q3 = 8348967325072059612026168622784453891507881426476603640658340020341944731532364677276401286358233081971838597029494396167050440290022806685890808240656759
p4 = 167661072178525609874536869751051800065390422834592103113971975955391615118678036572040576294964853025982786705404563191397770270731849495157247117854529039983840787661878167379723898817843318578402737767598910576316837813336887274651599847119701845895279082627804568462120651226573750359206381471191410662937
q4 = 8145167185335505501783087854760814147233023836090931783403657001079727963955491428876064700621053935085252069162037262941731093071208640285177101456231051
moduli = [p1 * q1, p2 * q2, p3 * q3, p4 * q4]
for i, (p, q) in enumerate(self.implicit.factorize_lsb(moduli, p_bitsize + q_bitsize, shared_bitsize)):
self.assertIsInstance(p, int)
self.assertIsInstance(q, int)
self.assertEqual(moduli[i], p * q)
def test_roca(self):
# 39th primorial
M = 962947420735983927056946215901134429196419130606213075415963491270
# These primes are chosen such that a' is pretty small so it doesn't take too long.
p = 85179386137518452231354185509698113331528483782580002217930594759662020757433
q = 121807704694511224555991770528701515984374557330058194205583818929517699002107
n = p * q
p_, q_ = self.roca.factorize(n, M, 5, 6)
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
def test_shor(self):
# Examples from the reference paper
p = 1789
q = 1847
n = p * q
p_, q_ = self.shor.factorize(n, 751228, 78)
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
p = 12343
q = 12391
n = p * q
p_, q_ = self.shor.factorize(n, 2, 4247705)
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
def test_twin_primes(self):
p = 4045364040964617981493056570547683620499113851384489798802437290109120991898115799819774088264427282611552038114397865000343325953101387058967136608664301
q = 4045364040964617981493056570547683620499113851384489798802437290109120991898115799819774088264427282611552038114397865000343325953101387058967136608664303
n = p * q
p_, q_ = self.twin_primes.factorize(n)
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
class TestGCM(TestCase):
from gcm import forbidden_attack
def test_forbidden_attack(self):
key = randbytes(16)
iv = randbytes(16)
aes = AES.new(key, AES.MODE_GCM, nonce=iv)
a1 = randbytes(16)
p1 = randbytes(16)
aes.update(a1)
c1, t1 = aes.encrypt_and_digest(p1)
aes = AES.new(key, AES.MODE_GCM, nonce=iv)
a2 = randbytes(16)
p2 = randbytes(16)
aes.update(a2)
c2, t2 = aes.encrypt_and_digest(p2)
for h in self.forbidden_attack.recover_possible_auth_keys(a1, c1, t1, a2, c2, t2):
target_a = randbytes(16)
target_c = randbytes(16)
forged_t = self.forbidden_attack.forge_tag(h, a1, c1, t1, target_a, target_c)
try:
aes = AES.new(key, AES.MODE_GCM, nonce=iv)
aes.update(target_a)
aes.decrypt_and_verify(target_c, forged_t)
break
except ValueError:
# Authentication failed, so we try the next authentication key.
continue
else:
self.fail()
class TestHNP(TestCase):
from hnp import lattice_attack
def _dsa(self, p, g, x):
h = getrandbits(p.bit_length())
k = randint(1, p - 1)
r = pow(g, k, p)
s = (pow(k, -1, p) * (h + x * r)) % p
return h, r, s, k
def test_lattice_attack(self):
# Not a safe prime, but it doesn't really matter.
p = 299182277398782807472682876223275635417
g = 5
x = randint(1, p - 1)
nonce_bitsize = p.bit_length()
msb_known = 7
n_signatures = 25
nonces = []
signatures = []
for i in range(n_signatures):
h, r, s, k = self._dsa(p, g, x)
nonces.append(k)
signatures.append((h, r, s, k >> (nonce_bitsize - msb_known)))
x_, nonces_ = next(self.lattice_attack.dsa_known_msb(p, signatures, nonce_bitsize, msb_known))
self.assertIsInstance(x_, int)
self.assertIsInstance(nonces_, list)
self.assertEqual(x, x_)
for i in range(n_signatures):
self.assertIsInstance(nonces_[i], int)
self.assertEqual(nonces[i], nonces_[i])
nonce_bitsize = p.bit_length()
lsb_known = 7
n_signatures = 25
nonces = []
signatures = []
for i in range(n_signatures):
h, r, s, k = self._dsa(p, g, x)
nonces.append(k)
signatures.append((h, r, s, k % (2 ** lsb_known)))
x_, nonces_ = next(self.lattice_attack.dsa_known_lsb(p, signatures, nonce_bitsize, lsb_known))
self.assertIsInstance(x_, int)
self.assertIsInstance(nonces_, list)
self.assertEqual(x, x_)
for i in range(n_signatures):
self.assertIsInstance(nonces_[i], int)
self.assertEqual(nonces[i], nonces_[i])
nonce_bitsize = p.bit_length()
msb_unknown = 10
lsb_unknown = 20
h1, r1, s1, k1 = self._dsa(p, g, x)
signature1 = (h1, r1, s1, (k1 >> lsb_unknown) % (2 ** (nonce_bitsize - msb_unknown)))
h2, r2, s2, k2 = self._dsa(p, g, x)
signature2 = (h2, r2, s2, (k2 >> lsb_unknown) % (2 ** (nonce_bitsize - msb_unknown)))
x_, k1_, k2_ = self.lattice_attack.dsa_known_middle(p, signature1, signature2, nonce_bitsize, msb_unknown, lsb_unknown)
self.assertIsInstance(x_, int)
self.assertIsInstance(k1_, int)
self.assertIsInstance(k2_, int)
self.assertEqual(x, x_)
self.assertEqual(k1, k1_)
self.assertEqual(k2, k2_)
class TestIGE(TestCase):
from ige import padding_oracle
def _encrypt(self, key, p):
p0 = randbytes(16)
c0 = randbytes(16)
cipher = AES.new(key, mode=AES.MODE_ECB)
p_last = p0
c_last = c0
c = bytearray()
for i in range(0, len(p), 16):
p_i = p[i:i + 16]
c_i = strxor(cipher.encrypt(strxor(p_i, c_last)), p_last)
p_last = p_i
c_last = c_i
c += c_i
return p0, c0, c
def _valid_padding(self, key, p0, c0, c):
try:
cipher = AES.new(key, mode=AES.MODE_ECB)
p_last = p0
c_last = c0
p = bytearray()
for i in range(0, len(c), 16):
c_i = c[i:i + 16]
p_i = strxor(cipher.decrypt(strxor(c_i, p_last)), c_last)
p_last = p_i
c_last = c_i
p += p_i
unpad(p, 16)
return True
except ValueError:
return False
def test_padding_oracle(self):
key = randbytes(16)
for i in range(16):
p = pad(randbytes(i + 1), 16)
p0, c0, c = self._encrypt(key, p)
p_ = self.padding_oracle.attack(lambda p0, c0, c: self._valid_padding(key, p0, c0, c), p0, c0, c)
self.assertEqual(p, p_)
class Knapsack(TestCase):
from knapsack import low_density
def test_low_density(self):
a = [429970831622, 650002882675, 512682138397, 145532365100, 462119415111, 357461497167, 582429951539, 22657777498, 2451348134, 380282710854, 251660920136, 103765486463, 276100153517, 250012242739, 519736909707, 451460714161]
s = 5398327344820
e = self.low_density.attack(a, s)
for i in range(len(a)):
self.assertIsInstance(e[i], int)
self.assertEqual(e, [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
a = [23449054409, 58564582991, 24739686534, 30995859145, 16274600764, 13384701522, 45782350364, 10685194276, 18864211511, 9594013152, 50215903866, 7952180124, 42094717093, 50866816333, 44318421949, 31143511315]
s = 42313265920
e = self.low_density.attack(a, s)
for i in range(len(a)):
self.assertIsInstance(e[i], int)
self.assertEqual(e, [1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0])
class LCG(TestCase):
from lcg import parameter_recovery
from lcg import truncated_parameter_recovery
from lcg import truncated_state_recovery
def test_parameter_recovery(self):
modulus = 230565400234205371157763985910524799617
multiplier = 192101630084837332907895369052393213499
increment = 212252940839553091477500231998099191939
state = 182679397636465813399296757573664340382
n_outputs = 10
outputs = []
for _ in range(n_outputs):
state = (multiplier * state + increment) % modulus
outputs.append(state)
modulus_, multiplier_, increment_ = self.parameter_recovery.attack(outputs)
self.assertIsInstance(modulus_, int)
self.assertEqual(modulus, modulus_)
self.assertIsInstance(multiplier_, int)
self.assertEqual(multiplier, multiplier_)
self.assertIsInstance(increment_, int)
self.assertEqual(increment, increment_)
modulus_, multiplier_, increment_ = self.parameter_recovery.attack(outputs, modulus=modulus)
self.assertIsInstance(modulus_, int)
self.assertEqual(modulus, modulus_)
self.assertIsInstance(multiplier_, int)
self.assertEqual(multiplier, multiplier_)
self.assertIsInstance(increment_, int)
self.assertEqual(increment, increment_)
modulus_, multiplier_, increment_ = self.parameter_recovery.attack(outputs, multiplier=multiplier)
self.assertIsInstance(modulus_, int)
self.assertEqual(modulus, modulus_)
self.assertIsInstance(multiplier_, int)
self.assertEqual(multiplier, multiplier_)
self.assertIsInstance(increment_, int)
self.assertEqual(increment, increment_)
modulus_, multiplier_, increment_ = self.parameter_recovery.attack(outputs, increment=increment)
self.assertIsInstance(modulus_, int)
self.assertEqual(modulus, modulus_)
self.assertIsInstance(multiplier_, int)
self.assertEqual(multiplier, multiplier_)
self.assertIsInstance(increment_, int)
self.assertEqual(increment, increment_)
modulus_, multiplier_, increment_ = self.parameter_recovery.attack(outputs, modulus=modulus, multiplier=multiplier)
self.assertIsInstance(modulus_, int)
self.assertEqual(modulus, modulus_)
self.assertIsInstance(multiplier_, int)
self.assertEqual(multiplier, multiplier_)
self.assertIsInstance(increment_, int)
self.assertEqual(increment, increment_)
modulus_, multiplier_, increment_ = self.parameter_recovery.attack(outputs, modulus=modulus, increment=increment)
self.assertIsInstance(modulus_, int)
self.assertEqual(modulus, modulus_)
self.assertIsInstance(multiplier_, int)
self.assertEqual(multiplier, multiplier_)
self.assertIsInstance(increment_, int)
self.assertEqual(increment, increment_)
modulus_, multiplier_, increment_ = self.parameter_recovery.attack(outputs, multiplier=multiplier, increment=increment)
self.assertIsInstance(modulus_, int)
self.assertEqual(modulus, modulus_)
self.assertIsInstance(multiplier_, int)
self.assertEqual(multiplier, multiplier_)
self.assertIsInstance(increment_, int)
self.assertEqual(increment, increment_)
modulus_, multiplier_, increment_ = self.parameter_recovery.attack(outputs, modulus=modulus, multiplier=multiplier, increment=increment)
self.assertIsInstance(modulus_, int)
self.assertEqual(modulus, modulus_)
self.assertIsInstance(multiplier_, int)
self.assertEqual(multiplier, multiplier_)
self.assertIsInstance(increment_, int)
self.assertEqual(increment, increment_)
def test_truncated_parameter_recovery(self):
state_bitsize = 128
output_bitsize = 32
modulus = 236360717458728691963813082060498623380
multiplier = 192101630084837332907895369052393213499
increment = 212252940839553091477500231998099191939
state = 182679397636465813399296757573664340382
n_outputs = 40
# The recovery method is not perfect, so we allow some errors in the generated output.
n_test = 200
max_failures = 5
outputs = []
states = []
for _ in range(n_outputs):
state = (multiplier * state + increment) % modulus
states.append(state)
outputs.append(state >> (state_bitsize - output_bitsize))
modulus_, multiplier_, increment_, seed_ = next(self.truncated_parameter_recovery.attack(outputs, state_bitsize, output_bitsize, state_bitsize))
self.assertIsInstance(modulus_, int)
self.assertIsInstance(multiplier_, int)
self.assertIsInstance(increment_, int)
self.assertIsInstance(seed_, int)
s = state
s_ = seed_
for _ in range(n_outputs):
s_ = (multiplier_ * s_ + increment_) % modulus_
failures = 0
for _ in range(n_test):
s = (multiplier * s + increment) % modulus
s_ = (multiplier_ * s_ + increment_) % modulus_
if (s >> (state_bitsize - output_bitsize)) != (s_ >> (state_bitsize - output_bitsize)):
failures += 1
self.assertLessEqual(failures, max_failures)
modulus_, multiplier_, increment_, seed_ = next(self.truncated_parameter_recovery.attack(outputs, state_bitsize, output_bitsize, state_bitsize, modulus=modulus))
self.assertIsInstance(modulus_, int)
self.assertIsInstance(multiplier_, int)
self.assertIsInstance(increment_, int)
self.assertIsInstance(seed_, int)
s = state
s_ = seed_
for _ in range(n_outputs):
s_ = (multiplier_ * s_ + increment_) % modulus_
failures = 0
for _ in range(n_test):
s = (multiplier * s + increment) % modulus
s_ = (multiplier_ * s_ + increment_) % modulus_
if (s >> (state_bitsize - output_bitsize)) != (s_ >> (state_bitsize - output_bitsize)):
failures += 1
self.assertLessEqual(failures, max_failures)
modulus_, multiplier_, increment_, seed_ = next(self.truncated_parameter_recovery.attack(outputs, state_bitsize, output_bitsize, state_bitsize, multiplier=multiplier))
| |
*/
onTouchStart : $.empty,
/** @type {function (): undefined} */
onTouchMove : $.empty,
/** @type {function (): undefined} */
onTouchEnd : $.empty,
/** @type {function (): undefined} */
onMouseWheel : $.empty
};
Options.Navigation = {
$extend : false,
enable : false,
type : "auto",
panning : false,
zooming : false
};
Options.Controller = {
$extend : true,
/** @type {function (): undefined} */
onBeforeCompute : $.empty,
/** @type {function (): undefined} */
onAfterCompute : $.empty,
/** @type {function (): undefined} */
onCreateLabel : $.empty,
/** @type {function (): undefined} */
onPlaceLabel : $.empty,
/** @type {function (): undefined} */
onComplete : $.empty,
/** @type {function (): undefined} */
onBeforePlotLine : $.empty,
/** @type {function (): undefined} */
onAfterPlotLine : $.empty,
/** @type {function (): undefined} */
onBeforePlotNode : $.empty,
/** @type {function (): undefined} */
onAfterPlotNode : $.empty,
request : false
};
var Events = {
/**
* @param {?} className
* @param {Object} viz
* @return {undefined}
*/
initialize : function(className, viz) {
/** @type {Object} */
this.viz = viz;
this.canvas = viz.canvas;
this.config = viz.config[className];
this.nodeTypes = viz.fx.nodeTypes;
var type = this.config.type;
/** @type {boolean} */
this.dom = type == "auto" ? viz.config.Label.type != "Native" : type != "Native";
this.labelContainer = this.dom && viz.labels.getLabelContainer();
if (this.isEnabled()) {
this.initializePost();
}
},
/** @type {function (): undefined} */
initializePost : $.empty,
setAsProperty : $.lambda(false),
/**
* @return {?}
*/
isEnabled : function() {
return this.config.enable;
},
/**
* @param {?} adj
* @param {?} lab
* @param {boolean} recurring
* @return {?}
*/
isLabel : function(adj, lab, recurring) {
adj = $.event.get(adj, lab);
var labelContainer = this.labelContainer;
var target = adj.target || adj.srcElement;
var related = adj.relatedTarget;
if (recurring) {
return related && (related == this.viz.canvas.getCtx().canvas && (!!target && this.isDescendantOf(target, labelContainer)));
} else {
return this.isDescendantOf(target, labelContainer);
}
},
/**
* @param {HTMLElement} elem
* @param {?} par
* @return {?}
*/
isDescendantOf : function(elem, par) {
for (;elem && elem.parentNode;) {
if (elem.parentNode == par) {
return elem;
}
elem = elem.parentNode;
}
return false;
}
};
var Aspect = {
/** @type {function (): undefined} */
onMouseUp : $.empty,
/** @type {function (): undefined} */
onMouseDown : $.empty,
/** @type {function (): undefined} */
onMouseMove : $.empty,
/** @type {function (): undefined} */
onMouseOver : $.empty,
/** @type {function (): undefined} */
onMouseOut : $.empty,
/** @type {function (): undefined} */
onMouseWheel : $.empty,
/** @type {function (): undefined} */
onTouchStart : $.empty,
/** @type {function (): undefined} */
onTouchMove : $.empty,
/** @type {function (): undefined} */
onTouchEnd : $.empty,
/** @type {function (): undefined} */
onTouchCancel : $.empty
};
var Tips = new Class({
/**
* @param {?} viz
* @return {undefined}
*/
initialize : function(viz) {
this.viz = viz;
this.canvas = viz.canvas;
/** @type {boolean} */
this.node = false;
/** @type {boolean} */
this.edge = false;
/** @type {Array} */
this.registeredObjects = [];
this.attachEvents();
},
/**
* @return {undefined}
*/
attachEvents : function() {
var element = this.canvas.getElement();
var that = this;
element.oncontextmenu = $.lambda(false);
$.addEvents(element, {
/**
* @param {Object} e
* @param {Object} win
* @return {undefined}
*/
mouseup : function(e, win) {
var event = $.event.get(e, win);
that.handleEvent("MouseUp", e, win, that.makeEventObject(e, win), $.event.isRightClick(event));
},
/**
* @param {Object} e
* @param {Object} win
* @return {undefined}
*/
mousedown : function(e, win) {
var event = $.event.get(e, win);
that.handleEvent("MouseDown", e, win, that.makeEventObject(e, win), $.event.isRightClick(event));
},
/**
* @param {Object} e
* @param {?} win
* @return {undefined}
*/
mousemove : function(e, win) {
that.handleEvent("MouseMove", e, win, that.makeEventObject(e, win));
},
/**
* @param {Object} e
* @param {?} win
* @return {undefined}
*/
mouseover : function(e, win) {
that.handleEvent("MouseOver", e, win, that.makeEventObject(e, win));
},
/**
* @param {Object} e
* @param {?} win
* @return {undefined}
*/
mouseout : function(e, win) {
that.handleEvent("MouseOut", e, win, that.makeEventObject(e, win));
},
/**
* @param {Object} e
* @param {?} win
* @return {undefined}
*/
touchstart : function(e, win) {
that.handleEvent("TouchStart", e, win, that.makeEventObject(e, win));
},
/**
* @param {Object} e
* @param {?} win
* @return {undefined}
*/
touchmove : function(e, win) {
that.handleEvent("TouchMove", e, win, that.makeEventObject(e, win));
},
/**
* @param {Object} e
* @param {?} win
* @return {undefined}
*/
touchend : function(e, win) {
that.handleEvent("TouchEnd", e, win, that.makeEventObject(e, win));
}
});
/**
* @param {Object} from
* @param {Object} win
* @return {undefined}
*/
var handleMouseWheel = function(from, win) {
var event = $.event.get(from, win);
var wheel = $.event.getWheel(event);
that.handleEvent("MouseWheel", from, win, wheel);
};
if (!document.getBoxObjectFor && window.mozInnerScreenX == null) {
$.addEvent(element, "mousewheel", handleMouseWheel);
} else {
element.addEventListener("DOMMouseScroll", handleMouseWheel, false);
}
},
/**
* @param {?} obj
* @return {undefined}
*/
register : function(obj) {
this.registeredObjects.push(obj);
},
/**
* @return {undefined}
*/
handleEvent : function() {
/** @type {Array.<?>} */
var args = Array.prototype.slice.call(arguments);
var type = args.shift();
/** @type {number} */
var i = 0;
var regs = this.registeredObjects;
var l = regs.length;
for (;i < l;i++) {
regs[i]["on" + type].apply(regs[i], args);
}
},
/**
* @param {Object} prop
* @param {?} win
* @return {?}
*/
makeEventObject : function(prop, win) {
var that = this;
var graph = this.viz.graph;
var fx = this.viz.fx;
var ntypes = fx.nodeTypes;
var etypes = fx.edgeTypes;
return{
pos : false,
node : false,
edge : false,
contains : false,
getNodeCalled : false,
getEdgeCalled : false,
/**
* @return {?}
*/
getPos : function() {
var canvas = that.viz.canvas;
var $cont = canvas.getSize();
var cameraPos = canvas.getPos();
var ox = canvas.translateOffsetX;
var oy = canvas.translateOffsetY;
var sx = canvas.scaleOffsetX;
var sy = canvas.scaleOffsetY;
var pos = $.event.getPos(prop, win);
this.pos = {
x : (pos.x - cameraPos.x - $cont.width / 2 - ox) * 1 / sx,
y : (pos.y - cameraPos.y - $cont.height / 2 - oy) * 1 / sy
};
return this.pos;
},
/**
* @return {?}
*/
getNode : function() {
if (this.getNodeCalled) {
return this.node;
}
/** @type {boolean} */
this.getNodeCalled = true;
var id;
for (id in graph.nodes) {
var n = graph.nodes[id];
var geom = n && ntypes[n.getData("type")];
var contains = geom && (geom.contains && geom.contains.call(fx, n, this.getPos()));
if (contains) {
this.contains = contains;
return that.node = this.node = n;
}
}
return that.node = this.node = false;
},
/**
* @return {?}
*/
getEdge : function() {
if (this.getEdgeCalled) {
return this.edge;
}
/** @type {boolean} */
this.getEdgeCalled = true;
var hashset = {};
var id;
for (id in graph.edges) {
var edgeFrom = graph.edges[id];
/** @type {boolean} */
hashset[id] = true;
var edgeId;
for (edgeId in edgeFrom) {
if (edgeId in hashset) {
continue;
}
var e = edgeFrom[edgeId];
var geom = e && etypes[e.getData("type")];
var contains = geom && (geom.contains && geom.contains.call(fx, e, this.getPos()));
if (contains) {
this.contains = contains;
return that.edge = this.edge = e;
}
}
}
return that.edge = this.edge = false;
},
/**
* @return {?}
*/
getContains : function() {
if (this.getNodeCalled) {
return this.contains;
}
this.getNode();
return this.contains;
}
};
}
});
var Extras = {
/**
* @return {undefined}
*/
initializeExtras : function() {
var doh = new Tips(this);
var that = this;
$.each(["NodeStyles", "Tips", "Navigation", "Events"], function(k) {
var obj = new Extras.Classes[k](k, that);
if (obj.isEnabled()) {
doh.register(obj);
}
if (obj.setAsProperty()) {
that[k.toLowerCase()] = obj;
}
});
}
};
Extras.Classes = {};
Extras.Classes.Events = new Class({
Implements : [Events, Aspect],
/**
* @return {undefined}
*/
initializePost : function() {
this.fx = this.viz.fx;
this.ntypes = this.viz.fx.nodeTypes;
this.etypes = this.viz.fx.edgeTypes;
/** @type {boolean} */
this.hovered = false;
/** @type {boolean} */
this.pressed = false;
/** @type {boolean} */
this.touched = false;
/** @type {boolean} */
this.touchMoved = false;
/** @type {boolean} */
this.moved = false;
},
setAsProperty : $.lambda(true),
/**
* @param {?} adj
* @param {?} lab
* @param {?} event
* | |
<filename>backend/dataProcess/tool.py<gh_stars>0
'''
Description:
Writer = "plh"
Data:2021/8/28
'''
import pandas as pd
import os.path as osp
import os
import numpy as np
import json
import time
import glob
import yaml
def bit():
import blocksmith
key = '<KEY>'
address = blocksmith.EthereumWallet.generate_address(key)
print(address)
# 0x1269645a46a3e86c1a3c3de8447092d90f6f04ed
checksum_address = blocksmith.EthereumWallet.checksum_address(address)
print(checksum_address)
# 0x1269645a46A3e86c1a3C3De8447092D90f6F04ED
def data2number(data, splitStr="/"):
"""
把各个变量的参数都提取出来
如果是失败的参数,末尾添加 -1
:param data: ['1', 'Threshold0/InputControl/minGray/20', 'Threshold0/InputControl/maxGray/40', 'OpeningCircle0/InputControl/2', 'SelectShape1/InputControl/min/0.8', 'FinalResult0/0', 'FinalResult/Error']
2074,Threshold0/InputControl/minGray/22,Threshold0/InputControl/maxGray/44,OpeningCircle0/InputControl/2,SelectShape1/InputControl/min/0.83,FinalResult0/2,FinalResult1/12459.9271205567,FinalResult2/18642.1
:param splitStr:
:return:
[1.0, 20.0, 40.0, 2.0, 0.8, 0.0, -1]
"""
returnData = []
for item in data:
itemSplitDot = item.split(splitStr)
param = itemSplitDot[len(itemSplitDot) - 1]
try:
paramF = float(param)
except Exception as ex:
paramF = -1
returnData.append(paramF)
return returnData
# 和文件读取有关
def list2csv(fileName, list):
test = pd.DataFrame(data=list) # 数据有三列,列名分别为one,two,three
test.to_csv(fileName, encoding='gbk')
def getMarixFromCsv(file):
'''
read matrix P from csv
:param file:csv filePath of matrix P
:return: numpy
'''
data = pd.read_csv(file)
# row = list(data.index.values)
# column = list(data.columns.values)
dataNp = data.values
dataNp = dataNp[:, 1:] # 去掉索引
return dataNp
def addJson(file, key, value):
'''
在json文件中添加新的key-value;要求原來的數據是dict
:param file:
:param key:
:param value:
:return:
'''
with open(file, "r") as f:
data = json.load(f)
data[key] = value
with open(file, "w") as f:
json.dump(data, f, indent=4)
def addJson1(file, key1, key2, value):
with open(file, "r") as f:
data = json.load(f)
if key1 not in data.keys():
data[key1] = {}
data[key1][key2] = value
with open(file, "w") as f:
json.dump(data, f, indent=4)
def judgeParametersCal(img_names, json_file):
'''
判断是否已经计算过多张图片的参数集合,如果计算过则返回相应信息
:param img_names:
:param json_file:
:return:
'''
img_names_l = len(img_names)
with open(json_file, "r") as f:
data = json.load(f)
for key, item in data.items():
# 判断img_names 是否被 item["imgNames"]包含
# 1.len(A交B) ==len(A)
intersection = list(set(img_names).intersection(set(item["imgNames"])))
if img_names_l == len(intersection):
return key, item
return None, None
def imageIndices(img_file, json_file, suffix="*.bmp"):
img_paths = glob.glob(osp.join(img_file, suffix))
res = {}
for i, img_path in enumerate(img_paths):
img_name = img_path.split("\\")[-1]
res[img_name] = i
with open(json_file, "w", ) as f:
json.dump(res, f, indent=4)
return res
def getParameters(img_indices, filter_config, json_file):
with open(json_file, "r") as f:
data = json.load(f)
if filter_config in data.keys() and img_indices in data[filter_config].keys():
return data[filter_config][img_indices]
else:
return None
def getParameterSetsIntersectionFromCsv(img_names, filter_config, filter_method):
'''因为文件导入的问题 失败, 新建文件 可能能解决'''
with open(yaml_file, 'r') as f:
cfg = yaml.safe_load(f)
print(cfg)
matrices = []
for img_name in img_names:
img_file = osp.join(cfg["caseBaseFile"], "parameter/splitData", "suc_" + img_name + ".csv")
matrix = getMarixFromCsv(img_file)
matrix = filter_method(matrix, filter_config)
matrix = matrix[:, 0:-2]
matrices.append(matrix)
insection_matrix = getIntersectionFromMatrixs(matrices)
return insection_matrix
# 和文件读取有关
def imgs2Indices(img_names, img_indices):
'''
图片名称到索引转换
:param img_names:
:param img_indices:
:return:
'''
res = ""
for img in img_names:
num = img_indices[img]
res = res + "_" + str(num)
return res[1:]
def searchCombination(file, value, keyCompare):
'''
檢索索引文件,觀察是否已經生成combination文件
:param file: string
:param value: {}
:param keyCompare: [string,...]
:return:
flag:True 已經存在數據
item["fileName"]: 文件名稱
'''
with open(file, "r") as f:
data = json.load(f)
for key, item in data.items():
flag = True
for key2 in keyCompare:
difference = list(set(value[key2]).difference(set(item[key2])))
difference2 = list(set(item[key2]).difference(set(value[key2])))
if len(difference) != 0 or len(difference2) != 0:
flag = False
break
if flag:
return flag, key
return False, []
def mkdir(file):
'''
创建文件夹
:param file:
可以用相对地址,会自动在前面加上当前运行文件的地址
也可以用绝对地址
:return:
'''
if not osp.exists(file):
os.mkdir(file)
def getImgNames(file):
'''
遍历图片文件夹,获得所有图片的名字
:param file:
:return:
'''
for root, dirs, files in os.walk(file):
imgNames = files
return imgNames
# 组合数有关
def nFactorial(start, final, step=1):
'''
start 到final的阶层
:param start:
:param final:
:param step:
:return:
'''
sum = 1
for i in range(start, final + 1, step):
sum = sum * i
return sum
def calCnm(n, m):
'''
Cnm 的组合数
:param n: n这个数大
:param m:
:return:
'''
a = nFactorial(n - m + 1, n)
b = nFactorial(1, m)
return int(a / b)
def calCnmSum(n, m):
'''
sum of Cn1+Cn2+...+Cnm
:param n: n这个数大
:param m:
:return:
'''
sum = 0
for i in range(1, m + 1):
sum = sum + calCnm(n, i)
return sum
# 组合数有关
# 格式处理
def formatChange(data):
'''
参数集合部分从list->array 然后去掉结果部分
:param data: dict
{
"imgNames":
[[],[],...],
...
}
:return: res: dict
{
"imgNames":
[[],[],...] :np.array,
...
}
'''
res = data.copy()
for key, item in res.items():
if len(item) != 0:
itemN = np.array(item)
itemN = itemN[:, :-2]
res[key] = itemN
else:
res[key] = np.array(item)
return res
def readTxt(filename, formatChange=None):
'''
读取txt文件,返回[[row],[row]...]
:param filename:
:return:
'''
res = []
with open(filename, "r") as f:
for line in f.readlines():
line = line.strip("\n").split(" ")
if formatChange is not None:
line = formatChange(line)
res.append(line)
return res
def readTxtPeremeter(filename, formatChange=None):
'''
读取txt文件,返回[[row],[row]...]
:param filename:
:return:
'''
res = {}
with open(filename, "r") as f:
for line in f.readlines():
line = line.strip("\n").split(" ")
if formatChange is not None:
name, line = formatChange(line)
res[name] = line
return res
# 格式处理
def formatChangeParameter(line):
newline = line[0] + '_' + line[1] + '_' + line[2] + '_' + line[3] + '_' + line[4]
return newline, line[5]
def formatChangeGraph(line):
newline = []
newline.append(line[0])
newline.append(line[1])
newline.append(float(line[2]))
return newline
def formatChangeGraph2(graph):
'''
getGraph 中返回的数据是比较详细的,做进一步的处理,方便下一步聚类的操作
:param graph:
:return:
'''
newgraph = []
for item in graph:
newitem = []
newitem.append(item[0])
newitem.append(item[1])
newitem.append(item[2])
newgraph.append(newitem)
return newgraph
def formatChangeGraph3(graph, cluster_matrix=None):
'''
将getGraph返回的数据处理成力导向图能接受的数据格式
:param graph:
:return:
'''
res = {}
nodestemp = {}
links = []
for item in graph:
if item[0] not in nodestemp.keys():
nodestemp[item[0]] = item[3]
if item[1] not in nodestemp.keys():
nodestemp[item[1]] = item[4]
if item[2] != 0:
link = {}
link["source"] = item[0]
link["target"] = item[1]
link["unionDec"] = item[2]
links.append(link)
res["links"] = links
nodes = []
for key in nodestemp.keys():
node = {}
node["id"] = key
node["unionNum"] = nodestemp[key]
if cluster_matrix is not None:
node["cluster_matrix"] = cluster_matrix[key].tolist()
nodes.append(node)
res["nodes"] = nodes
return res
def formatChangeGraph4(graph, cluster_res,add_info):
'''
将getGraph返回的数据处理成力导向图能接受的数据格式,并将add_info中的额外节点信息加入到结果中去
:param graph:
:return:
'''
res = {}
nodestemp = {}
links = []
for item in graph:
if item[0] not in nodestemp.keys():
nodestemp[item[0]] = item[3]
if item[1] not in nodestemp.keys():
nodestemp[item[1]] = item[4]
if item[2] != 0:
link = {}
link["source"] = item[0]
link["target"] = item[1]
link["unionDec"] = item[2]
links.append(link)
res["links"] = links
nodes = []
for key in cluster_res:
node = {}
node["id"] = key
node["unionNum"] = add_info[key]["n_success_filter"]
for key1 in add_info[key].keys():
node[key1] = add_info[key][key1]
nodes.append(node)
res["nodes"] = nodes
return res
# 和文件命名有关
def randomFileName(prefix="combination", suffix=".json"):
'''
返回一個隨機生成的文件名
:return:
'''
t = time.time()
fileName = prefix + str(round(t)) + suffix
return fileName
def createFilename(prefix, suffix, connector, parameters):
filename = prefix
for key in parameters.keys():
filename = filename + connector + str(key) + connector + str(parameters[key])
filename = filename + suffix
return filename
# others
def calTotalParameterSets(cfg):
n = 1
temp = 1000
parameterConfig = cfg["parameterConfig"]
for key in parameterConfig.keys():
if parameterConfig[key]["use"]:
n = n * (parameterConfig[key]["maxValue"] * temp - parameterConfig[key]["minValue"] * temp) / (parameterConfig[key]["step"] * temp) + n # 因为首尾的问题,所以要先加1
return int(n)
def removeBracketList(l):
'''
去掉list中的所有括号
:param l:
:return:
'''
if not isinstance(l,list):
return l
new_l = []
for item in l:
if isinstance(item, list):
new_l.extend(removeBracketList(item))
else:
new_l.append(item)
return new_l
def list2str(l,connector='_',format=None):
'''
把list中的内容变成str
:param l:
:param connector:
:param format:
:return:
'''
if not isinstance(l,list):
return str(l)
res = ""
for item in l:
if format is not None:
res = res+format(item)+connector
else:
res = res + str(item) + connector
return res[:-1]
def getIds(l,sort=True):
'''
得到图片的id
:param l: [1,2,3,4]
:param sort:
:return: "1_2_3_4"
'''
if len(l)==0:
return []
res = ""
l_c = l.copy()
if sort:
l_c.sort()
for id in l_c:
res = res + "_" + str(id)
return res[1:]
if __name__ == '__main__':
l=[15,[2,4]]
a = removeBracketList(l)
print(a)
input()
# l = 15
# res = list2str(l)
# print(res)
# input()
# l = [1,2,3]
# l = [[1,2],3]
# l = [[1],2,3]
# new_l = reBracketList(l)
# print(new_l)
# input()
# yaml_file = "../config.yml"
# with open(yaml_file, 'r') as f:
# cfg = yaml.safe_load(f)
# n = calTotalParameterSets(cfg)
# print(n)
# input()
# img_names = ["Image_20210812150340363.bmp",
# "Image_20210812150343338.bmp",
# "Image_20210812150345651.bmp"]
# filter_config ={"minValue":18000,"maxValue":25000}
#
# matrix = getParameterSetsIntersectionFromCsv(img_names, filter_config, fileterSingleMatrix_case1)
# print(matrix)
# input()
filename = "D:\codeTest\parameterExp\data\case1\graph\graph_imgs_0_1_2_3_4_5_6_7_8_9_10_11_12_13_14_15_16_17_18_19_20_21_edge_jac_filter_23000_25000_process_1_0.3.txt"
res = readTxt(filename, formatChangeGraph)
# print(res)
# imgNames = getImgNames("D:\codeTest\parameterExp\data\case1\img")
# print("imgNames",imgNames)
# a = np.array([[]])
# print(np.shape(a))
# print(a)
#
# img_file = "D:\codeTest\parameterExp\data\case1\img"
# json_file = "D:\codeTest\parameterExp\data\case1\combination\img_indices.json"
# res = imageIndices(img_file, json_file)
# a = np.array([[1,2,3],[4,5,6]])
# b = a.tolist()
# bit()
# img_names = ["Image_20210812150340363.bmp",
# "Image_20210812150343338.bmp",
# "Image_20210812150345651.bmp"]
# img_indices = {
# "Image_20210812150340363.bmp": 0,
# "Image_20210812150343338.bmp": 1,
# "Image_20210812150345651.bmp": 2,
# "Image_20210812150348106.bmp": 3,
# "Image_20210812150439515.bmp": 4,
# "Image_20210812150442099.bmp": 5,
| |
<gh_stars>0
import re
from inspect import isclass
from textwrap import dedent
from scheme import *
from mesh.address import *
from mesh.constants import *
from mesh.endpoint import *
from mesh.exceptions import *
from mesh.util import *
class Configuration(object):
"""A resource configuration scheme.
:param dict standard_endpoints: Optional, default is ``None``; a ``dict`` mapping endpoint
names to callables which will generate a standard endpoint definition for a given
resource and its schema. The callable should take a single :class:`Resource` argument.
:param list default_endpoints: Optional, default is ``None``; a ``list`` indicating which
standard endpoints provided by this configuration should be included by default on a
resource which doesn't explicitly specify ``endpoints`` at class level.
:param list validated_endpoints: Optional, default is ``None``; a ``list`` indicating which
standard endpoints provided by this configuration should be subject to validation by
validators which don't explicitly specify endpoints.
:param Field id_field: Optional, default is ``None``; a :class:`scheme.Field` which will
serve as the unique identifier field for resources associated with this configuration
which do not themselves declare an identifier field. If not specified, a Integer field
with the name "id" will be constructed for this configuration.
:param default_controller: Optional, default is ``None``; a subclass of :class:`Controller`
which will be used as the base class for automatically generated mock controllers under
this configuration.
"""
def __init__(self, standard_endpoints=None, default_endpoints=None, validated_endpoints=None,
id_field=None, default_controller=None):
self.default_controller = default_controller or Controller
self.default_endpoints = default_endpoints or []
self.id_field = id_field or Integer(name='id', nonnull=True)
self.standard_endpoints = standard_endpoints or {}
self.validated_endpoints = validated_endpoints or []
def create_controller(self, resource):
if self.default_controller:
return type('%sController' % resource.__name__, (self.default_controller,), {
'configuration': self,
'resource': resource,
'version': (resource.version, 0),
})
def associate_resource_version(resource):
version = resource.version
if version is None:
return
try:
versions = resource.versions
except AttributeError:
resource.versions = {version: resource}
return
if version not in versions:
versions[version] = resource
else:
raise SpecificationError('cannot declare duplicate version of %r' % resource)
class ResourceMeta(type):
ATTRS = ('abstract', 'composite_key', 'configuration', 'name', 'version')
def __new__(metatype, name, bases, namespace):
asis = namespace.pop('__asis__', False)
if asis:
resource = type.__new__(metatype, name, bases, namespace)
associate_resource_version(resource)
return resource
base_class = None
if namespace.get('abstract', False):
base_class = bases[0]
if len(bases) > 1 or base_class.name is not None:
raise SpecificationError('abstract resource %r may only inherit from a single'
' abstract base resource' % name)
else:
for candidate in bases:
if getattr(candidate, 'abstract', False):
continue
elif base_class is None:
base_class = candidate
else:
raise SpecificationError('concrete resource %r must inherit from only one'
' concrete resource' % name)
if not base_class:
raise SpecificationError('resource %r must inherit from exactly one non-abstract'
' base resource' % name)
configuration = getattr(base_class, 'configuration', None)
if not configuration:
configuration = namespace.get('configuration', None)
if not configuration:
return type.__new__(metatype, name, (base_class,), namespace)
elif not isinstance(configuration, Configuration):
raise SpecificationError('invalid configuration')
schema = namespace.pop('schema', {})
if isclass(schema):
schema = pull_class_dict(schema)
if not isinstance(schema, dict):
raise SpecificationError('resource %r has an invalid schema' % name)
removed_fields = set()
for attr in list(schema):
if isinstance(schema[attr], Field):
schema[attr].name = attr
else:
if schema[attr] is None:
removed_fields.add(attr)
del schema[attr]
requested_endpoints = namespace.pop('endpoints', None)
if isinstance(requested_endpoints, string):
requested_endpoints = requested_endpoints.split(' ')
if requested_endpoints is None:
requested_endpoints = configuration.default_endpoints
declared_endpoints = {}
removed_attrs = set()
for attr in list(namespace):
if attr not in metatype.ATTRS and not attr.startswith('_'):
if isclass(namespace[attr]):
declared_endpoints[attr] = namespace.pop(attr)
elif namespace[attr] is None:
removed_attrs.add(attr)
namespace.pop(attr)
resource = type.__new__(metatype, name, (base_class,), namespace)
if resource.version is not None:
if not (isinstance(resource.version, int) and resource.version >= 1):
raise SpecificationError('resource %r declares an invalid version' % name)
resource.endpoints = {}
resource.schema = {}
resource.validators = {}
inherited_endpoints = set()
for base in reversed(bases):
if hasattr(base, 'schema'):
resource.schema.update(base.schema)
resource.validators.update(base.validators)
for name, endpoint in base.endpoints.items():
inherited_endpoints.add(endpoint)
resource.endpoints[name] = endpoint
resource.schema.update(schema)
for name in removed_fields:
if name in resource.schema:
del resource.schema[name]
id_field = configuration.id_field
if id_field.name in resource.schema:
resource.schema[id_field.name].is_identifier = True
elif id_field.name not in removed_fields:
resource.schema[id_field.name] = id_field.clone(is_identifier=True)
resource.id_field = resource.schema.get(id_field.name)
if isinstance(resource.composite_key, string):
resource.composite_key = resource.composite_key.split(' ')
if resource.composite_key:
for key in resource.composite_key:
if key not in resource.schema:
raise SpecificationError('resource %r declares an invalid composite key' % name)
for name, endpoint in declared_endpoints.items():
resource.endpoints[name] = Endpoint.construct(resource, endpoint)
for attr, value in namespace.items():
if isinstance(value, classmethod):
value = getattr(resource, attr)
if getattr(value, '__validates__', False):
resource.validators[value.__name__] = value
delattr(resource, value.__name__)
resource.description = dedent(resource.__doc__ or '')
if resource.name is None:
associate_resource_version(resource)
return resource
if requested_endpoints:
for name in requested_endpoints:
constructor = configuration.standard_endpoints.get(name)
if constructor:
endpoint = resource.endpoints.get(name)
if endpoint and endpoint in inherited_endpoints and endpoint.auto_constructed:
endpoint = None
if not endpoint:
endpoint = constructor(resource)
if endpoint:
resource.endpoints[name] = endpoint
else:
raise SpecificationError('resource %r requests unknown standard request %r'
% (resource.name, name))
for collection in (resource.endpoints, resource.validators):
for name in list(collection):
if name in removed_attrs:
del collection[name]
for validator in resource.validators.values():
if validator.endpoints is None:
set_function_attr(validator, 'endpoints', configuration.validated_endpoints)
for endpoint_name in validator.endpoints:
if endpoint_name in resource.endpoints:
resource.endpoints[endpoint_name].validators.append(validator)
associate_resource_version(resource)
return resource
def __getattr__(resource, name):
endpoints = type.__getattribute__(resource, 'endpoints')
target = endpoints.get(name)
if target:
get_endpoint = lambda *args: target
else:
candidate = resource.configuration.standard_endpoints.get(name)
if not candidate:
raise AttributeError(name)
elif isinstance(candidate, EndpointConstructor):
get_endpoint = lambda *args: candidate(*args)
else:
get_endpoint = candidate
return type(name, (object,), {
'get_endpoint': staticmethod(get_endpoint),
})
def __getitem__(resource, version):
return resource.versions[version]
def __repr__(resource):
if resource.name:
return '%s(name=%r, version=%r)' % (resource.__name__, resource.name,
resource.version)
else:
return resource.__name__
def __str__(resource):
if resource.name:
return '%s:%d' % (resource.name, resource.version)
else:
return resource.__name__
@property
def maximum_version(resource):
return max(resource.versions.keys())
@property
def minimum_version(resource):
return min(resource.versions.keys())
@property
def title(resource):
chars = []
for char in resource.__name__:
if char.isupper():
chars.append(' ')
chars.append(char)
return ''.join(chars).strip()
def describe(resource, controller=None, address=None, verbose=False, omissions=None):
if address:
address = address.clone(resource=resource.name)
else:
address = Address(resource=resource.name)
description = {
'__subject__': 'resource',
'abstract': resource.abstract,
'classname': resource.__name__,
'composite_key': resource.composite_key,
'controller': None,
'description': resource.description,
'id': None,
'name': resource.name,
'resource': identify_class(resource),
'title': resource.title,
}
if address.bundle:
description['id'] = str(address)
if controller:
description['controller'] = identify_class(controller)
description['version'] = controller.version
else:
description['version'] = (resource.version, 0)
description['schema'] = {}
for name, field in resource.schema.items():
if omissions and name in omissions:
field = Field(name=name)
description['schema'][name] = field.describe(verbose=verbose)
description['endpoints'] = {}
for name, endpoint in resource.endpoints.items():
description['endpoints'][name] = endpoint.describe(address, verbose, omissions)
return description
def enumerate_endpoints(resource, address=None):
if not address:
address = Address()
for name, endpoint in resource.endpoints.items():
yield endpoint.attach(address), endpoint
def filter_schema(resource, all=False, **params):
schema = {}
for name, field in resource.schema.items():
candidate = field.filter(all, **params)
if candidate:
schema[name] = candidate
return schema
def mirror_schema(resource, exclude=None):
if isinstance(exclude, string):
exclude = exclude.split(' ')
schema = {}
for name, field in resource.schema.items():
if not exclude or name not in exclude:
schema[name] = field.clone()
return schema
def reconstruct(resource, description, configuration=None):
if not (resource.configuration or configuration):
raise TypeError('cannot reconstruct resource without configuration')
namespace = {
'__asis__': True,
'composite_key': description.get('composite_key'),
'name': description['name'],
'endpoints': {},
'schema': {},
'validators': {},
'version': description['version'][0]}
if configuration:
namespace['configuration'] = configuration
schema = description.get('schema')
if isinstance(schema, dict):
for name, field in schema.items():
namespace['schema'][name] = Field.reconstruct(field)
resource = type(str(description['title']), (resource,), namespace)
resource.id_field = resource.schema.get(resource.configuration.id_field.name)
endpoints = description.get('endpoints')
if isinstance(endpoints, dict):
for name, endpoint in endpoints.items():
namespace['endpoints'][name] = Endpoint.reconstruct(resource, endpoint)
return resource
@with_metaclass(ResourceMeta)
class Resource(object):
"""A resource definition."""
configuration = None
abstract = False
composite_key = None
name = None
version = None
class ControllerMeta(type):
def __new__(metatype, name, bases, namespace):
controller = type.__new__(metatype, name, bases, namespace)
metatype.__metaconstruct__(controller, name, bases, namespace)
return controller
@staticmethod
def __metaconstruct__(controller, name, bases, namespace):
resource = controller.resource
if resource is not None:
version = controller.version
if not (isinstance(resource, type) and issubclass(resource, Resource)):
raise SpecificationError('controller %r specifies a invalid resource' % name)
if not (isinstance(version, tuple) and len(version) == 2 and isinstance(version[0], int)
and version[0] >= 1 and isinstance(version[1], int) and version[1] >= 0):
raise SpecificationError('controller %r declares an invalid version: %r'
% (name, version))
if version[0] in resource.versions:
resource = controller.resource = resource.versions[version[0]]
else:
raise SpecificationError('controller %r specifies an unknown version %r of'
' resource %r' % (name, version[0], resource.name))
elif controller.version is not None:
raise SpecificationError('abstract controller %s must not specify a version' % name)
else:
return controller
controller.endpoints = {}
for endpoint in resource.endpoints.keys():
implementation = getattr(controller, endpoint, None)
if implementation:
controller.endpoints[endpoint] = implementation
versions = getattr(controller, 'versions', None)
| |
image.shape[dimension]. Values beyond 0 and 1 are
possible and even recommended)
:param max_strength: scaling of the intensity gradient. Determines what max(abs(add_gauss)) is going to be
float: fixed value
(float, float): sampled from [max_strength[0], max_strength[1]]
callable: you decide. Will be called as max_strength(image, gauss_add). Do not modify gauss_add.
Must return a scalar.
:param same_for_all_channels: If True, then the same gradient will be applied to all selected color
channels of a sample (see p_per_channel). If False, each selected channel obtains its own random gradient.
:param mean_centered: if True, the brightness addition will be done such that the mean intensity of the image
does not change. So if a bright spot is added, other parts of the image will have something subtracted to keep
the mean intensity the same as it was before
:param p_per_sample:
:param p_per_channel:
:param clip_intensities:
:param data_key:
"""
super().__init__(scale, loc)
self.max_strength = max_strength
self.p_per_sample = p_per_sample
self.p_per_channel = p_per_channel
self.data_key = data_key
self.same_for_all_channels = same_for_all_channels
self.mean_centered = mean_centered
self.clip_intensities = clip_intensities
def __call__(self, **data_dict):
data = data_dict.get(self.data_key)
assert data is not None, "Could not find data key '%s'" % self.data_key
b, c, *img_shape = data.shape
for bi in range(b):
if np.random.uniform() < self.p_per_sample:
if self.same_for_all_channels:
kernel = self._generate_kernel(img_shape)
if self.mean_centered:
# first center the mean of the kernel
kernel -= kernel.mean()
mx = max(np.max(np.abs(kernel)), 1e-8)
if not callable(self.max_strength):
strength = self._get_max_strength(None, None)
for ci in range(c):
if np.random.uniform() < self.p_per_channel:
# now rescale so that the maximum value of the kernel is max_strength
strength = self._get_max_strength(data[bi, ci], kernel) if callable(
self.max_strength) else strength
kernel_scaled = np.copy(kernel) / mx * strength
data[bi, ci] += kernel_scaled
else:
for ci in range(c):
if np.random.uniform() < self.p_per_channel:
kernel = self._generate_kernel(img_shape)
if self.mean_centered:
kernel -= kernel.mean()
mx = max(np.max(np.abs(kernel)), 1e-8)
strength = self._get_max_strength(data[bi, ci], kernel)
kernel = kernel / mx * strength
data[bi, ci] += kernel
return data_dict
def _get_max_strength(self, image, add_gauss):
if isinstance(self.max_strength, (int, float)):
return self.max_strength
elif isinstance(self.max_strength, (list, tuple)):
assert len(self.max_strength) == 2
return np.random.uniform(*self.max_strength)
elif callable(self.max_strength):
return self.max_strength(image, add_gauss)
else:
raise RuntimeError()
class LocalGammaTransform(LocalGaussianSomethingTransform):
def __init__(self,
scale: Union[Tuple[float, float], float, Callable[[Union[Tuple[int, ...], List[int]], int], float]],
loc: Union[Tuple[float, float], Callable[[Union[Tuple[int, ...], List[int]], int], float]] = (-1, 2),
gamma: Union[float, Tuple[float, float], Callable[[], float]] = (0.5, 1),
same_for_all_channels: bool = True,
p_per_sample: float = 1.,
p_per_channel: float = 1.,
data_key: str = "data"):
"""
This transform is weird and definitely experimental. Use at your own risk
Applies gamma correction to the image. The gamma value varies locally using a gaussian kernel
The local gamma values are implemented by placing a Gaussian distribution with sigma=scale somewhere in
(or close to) the image. The location of the kernel is selected independently for each image dimension.
The location is encoded in % of the image size. The default value of (-1, 2) means that the location will be
sampled uniformly from (-image.shape[i], 2* image.shape[i]). It is important to allow the center of the kernel
to be outside of the image.
IMPORTANT: Try this with different parametrizations and visualize the outcome to get a better feeling for how
to use this!
:param scale: scale of the gradient. Large values recommended!
float: fixed value
(float, float): will be sampled independently for each dimension from the interval [scale[0], scale[1]]
callable: you get all the freedom you want. Will be called as scale(image.shape, dimension) where dimension
is the index in image.shape we are requesting the scale for. Must return scalar (float).
:param loc:
(float, float): sample location uniformly from interval [scale[0], scale[1]] (see main description)
callable: you get all the freedom you want. Will be called as loc(image.shape, dimension) where dimension
is the index in image.shape we are requesting the location for. Must return a scalar value denoting a relative
position along axis dimension (0 for index 0, 1 for image.shape[dimension]. Values beyond 0 and 1 are
possible and even recommended)
:param gamma: gamma value at the peak of the gaussian distribution.
Recommended: lambda: np.random.uniform(0.01, 1) if np.random.uniform() < 1 else np.random.uniform(1, 3)
No, this is not a joke. Deal with it.
:param same_for_all_channels: If True, then the same gradient will be applied to all selected color
channels of a sample (see p_per_channel). If False, each selected channel obtains its own random gradient.
:param allow_kernel_inversion:
:param p_per_sample:
:param p_per_channel:
:param data_key:
"""
super().__init__(scale, loc)
self.gamma = gamma
self.p_per_sample = p_per_sample
self.p_per_channel = p_per_channel
self.data_key = data_key
self.same_for_all_channels = same_for_all_channels
def __call__(self, **data_dict):
data = data_dict.get(self.data_key)
assert data is not None, "Could not find data key '%s'" % self.data_key
b, c, *img_shape = data.shape
for bi in range(b):
if np.random.uniform() < self.p_per_sample:
if self.same_for_all_channels:
kernel = self._generate_kernel(img_shape)
for ci in range(c):
if np.random.uniform() < self.p_per_channel:
data[bi, ci] = self._apply_gamma_gradient(data[bi, ci], kernel)
else:
for ci in range(c):
if np.random.uniform() < self.p_per_channel:
kernel = self._generate_kernel(img_shape)
data[bi, ci] = self._apply_gamma_gradient(data[bi, ci], kernel)
return data_dict
def _get_gamma(self):
if isinstance(self.gamma, (int, float)):
return self.gamma
elif isinstance(self.gamma, (list, tuple)):
assert len(self.gamma) == 2
return np.random.uniform(*self.gamma)
elif callable(self.gamma):
return self.gamma()
else:
raise RuntimeError()
def _apply_gamma_gradient(self, img: np.ndarray, kernel: np.ndarray) -> np.ndarray:
# copy kernel so that we don't modify it out of scope
kernel = np.copy(kernel)
# store keep original image range
mn, mx = img.min(), img.max()
# rescale tp [0, 1]
img = (img - mn) / (max(mx - mn, 1e-8))
inside_gamma = self._get_gamma()
outside_gamma = 1
# prepare kernel by rescaling it to gamma_range
k_min, k_max = kernel.min(), kernel.max()
kernel = (kernel - k_min) / (max(k_max - k_min, 1e-8)) # [0, 1]
kernel *= (inside_gamma - outside_gamma)
kernel += outside_gamma
# corrected = image ** gamma
img = np.power(img, kernel)
# restore original range
img = img * (mx - mn) + mn
return img
class LocalSmoothingTransform(LocalGaussianSomethingTransform):
def __init__(self,
scale: Union[Tuple[float, float], float, Callable[[Union[Tuple[int, ...], List[int]], int], float]],
loc: Union[Tuple[float, float], Callable[[Union[Tuple[int, ...], List[int]], int], float]] = (-1, 2),
smoothing_strength: Union[float, Tuple[float, float], Callable[[], float]] = (0.5, 1),
kernel_size: Union[float, Tuple[float, float], Callable[[], float]] = (0.5, 1),
same_for_all_channels: bool = True,
p_per_sample: float = 1.,
p_per_channel: float = 1.,
data_key: str = "data"):
"""
Creates a local blurring of the image. This is achieved by creating ablussed copy of the image and then
linearly interpolating between the original and smoothed images:
result = image_orig * (1 - smoothing_strength) + smoothed_image * smoothing_strength
The interpolation only happens where the local gaussian is placed (defined by scale and loc)
strength of smoothing is determined by kernel_size in combination with smoothing_strength. You can set
smoothing_strength=1 for simplicity
:param scale:
:param loc:
:param smoothing_strength:
:param kernel_size:
:param same_for_all_channels:
:param p_per_sample:
:param p_per_channel:
:param data_key:
"""
super().__init__(scale, loc)
self.smoothing_strength = smoothing_strength
self.same_for_all_channels = same_for_all_channels
self.p_per_sample = p_per_sample
self.p_per_channel = p_per_channel
self.data_key = data_key
self.kernel_size = kernel_size
def __call__(self, **data_dict):
data = data_dict.get(self.data_key)
assert data is not None, "Could not find data key '%s'" % self.data_key
b, c, *img_shape = data.shape
for bi in range(b):
if np.random.uniform() < self.p_per_sample:
if self.same_for_all_channels:
kernel = self._generate_kernel(img_shape)
for ci in range(c):
if np.random.uniform() < self.p_per_channel:
data[bi, ci] = self._apply_local_smoothing(data[bi, ci], kernel)
else:
for ci in range(c):
if np.random.uniform() < self.p_per_channel:
kernel = self._generate_kernel(img_shape)
data[bi, ci] = self._apply_local_smoothing(data[bi, ci], kernel)
return data_dict
def _get_smoothing(self):
if isinstance(self.smoothing_strength, (int, float)):
return self.smoothing_strength
elif isinstance(self.smoothing_strength, (list, tuple)):
assert len(self.smoothing_strength) == 2
return np.random.uniform(*self.smoothing_strength)
elif callable(self.smoothing_strength):
return self.smoothing_strength()
else:
raise RuntimeError()
def _get_kernel_size(self):
if isinstance(self.kernel_size, (int, float)):
return self.kernel_size
elif isinstance(self.kernel_size, (list, tuple)):
assert len(self.kernel_size) == 2
return np.random.uniform(*self.kernel_size)
elif callable(self.kernel_size):
return self.kernel_size()
else:
raise RuntimeError()
def _apply_local_smoothing(self, img: np.ndarray, kernel: np.ndarray) -> np.ndarray:
# copy kernel so that we don't modify it out of scope
kernel = np.copy(kernel)
smoothing = self._get_smoothing()
# prepare kernel by rescaling it to gamma_range
k_min, k_max = kernel.min(), kernel.max()
kernel = (kernel - k_min) / (max(k_max - k_min, 1e-8)) # [0, 1]
kernel *= smoothing
kernel_size = self._get_kernel_size()
img_smoothed = gaussian_filter(img, kernel_size)
return img * | |
<gh_stars>10-100
"""
DeepImageJ
https://deepimagej.github.io/deepimagej/
Conditions of use:
DeepImageJ is an open source software (OSS): you can redistribute it and/or modify it under
the terms of the BSD 2-Clause License.
In addition, we strongly encourage you to include adequate citations and acknowledgments
whenever you present or publish results that are based on it.
DeepImageJ is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
You should have received a copy of the BSD 2-Clause License along with DeepImageJ.
If not, see <https://opensource.org/licenses/bsd-license.php>.
Reference:
DeepImageJ: A user-friendly plugin to run deep learning models in ImageJ
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
Submitted 2019.
Bioengineering and Aerospace Engineering Department, Universidad Carlos III de Madrid, Spain
Biomedical Imaging Group, Ecole polytechnique federale de Lausanne (EPFL), Switzerland
Corresponding authors: <EMAIL>, <EMAIL>
Copyright 2019. Universidad Carlos III, Madrid, Spain and EPFL, Lausanne, Switzerland.
"""
import os
import xml.etree.ElementTree as ET
import time
import numpy as np
import urllib
import shutil
from skimage import io
"""
Download the template from this link:
https://raw.githubusercontent.com/esgomezm/python4deepimagej/yaml/yaml/config_template.xml
TensorFlow library is needed. It is imported later to save the model as a SavedModel protobuffer
Try to check TensorFlow version and read DeepImageJ's compatibility requirements.
import tensorflow as tf
tf.__version__
----------------------------------------------------
Example:
----------------------------------------------------
dij_config = DeepImageJConfig(model)
# Update model information
dij_config.Authors = authors
dij_config.Credits = credits
# Add info about the minimum size in case it is not fixed.
pooling_steps = 0
for keras_layer in model.layers:
if keras_layer.name.startswith('max') or "pool" in keras_layer.name:
pooling_steps += 1
dij_config.MinimumSize = np.str(2**(pooling_steps))
# Add the information about the test image
dij_config.add_test_info(test_img, test_prediction, PixelSize)
## Prepare preprocessing file
path_preprocessing = "PercentileNormalization.ijm"
urllib.request.urlretrieve("https://raw.githubusercontent.com/deepimagej/imagej-macros/master/PercentileNormalization.ijm", path_preprocessing )
# Include the info about the preprocessing
dij_config.add_preprocessing(path_preprocessing, "preprocessing")
## Prepare postprocessing file
path_postprocessing = "8bitBinarize.ijm"
urllib.request.urlretrieve("https://raw.githubusercontent.com/deepimagej/imagej-macros/master/8bitBinarize.ijm", path_postprocessing )
# Include the info about the postprocessing
post_processing_name = "postprocessing_LocalMaximaSMLM"
dij_config.add_postprocessing(path_postprocessing_max,post_processing_name)
## EXPORT THE MODEL
deepimagej_model_path = os.path.join(QC_model_folder, 'deepimagej')
dij_config.export_model(model, deepimagej_model_path)
----------------------------------------------------
Example: change one line in an ImageJ macro
----------------------------------------------------
## Prepare postprocessing file
path_postprocessing = "8bitBinarize.ijm"
urllib.request.urlretrieve("https://raw.githubusercontent.com/deepimagej/imagej-macros/master/8bitBinarize.ijm", path_postprocessing )
# Modify the threshold in the macro to the chosen threshold
ijmacro = open(path_postprocessing,"r")
list_of_lines = ijmacro. readlines()
# Line 21 is the one corresponding to the optimal threshold
list_of_lines[21] = "optimalThreshold = {}\n".format(128)
ijmacro.close()
ijmacro = open(path_postprocessing,"w")
ijmacro. writelines(list_of_lines)
ijmacro. close()
"""
class DeepImageJConfig:
def __init__(self, tf_model):
# ModelInformation
self.Name = 'null'
self.Authors = 'null'
self.URL = 'null'
self.Credits = 'null'
self.Version = 'null'
self.References = 'null'
self.Date = time.ctime()
# Same value as 2**pooling_steps
# (related to encoder-decoder archtiectures) when the input size is not
# fixed
self.MinimumSize = '8'
self.get_dimensions(tf_model)
# Receptive field of the network to process input
self.Padding = np.str(self._pixel_half_receptive_field(tf_model))
self.Preprocessing = list()
self.Postprocessing = list()
self.Preprocessing_files = list()
self.Postprocessing_files = list()
def get_dimensions(self, tf_model):
"""
Calculates the array organization and shapes of inputs and outputs.
"""
input_dim = tf_model.input_shape
output_dim = tf_model.output_shape
# Deal with the order of the dimensions and whether the size is fixed
# or not
if input_dim[2] is None:
self.FixedPatch = 'false'
self.PatchSize = self.MinimumSize
if input_dim[-1] is None:
self.InputOrganization0 = 'NCHW'
self.Channels = np.str(input_dim[1])
else:
self.InputOrganization0 = 'NHWC'
self.Channels = np.str(input_dim[-1])
if output_dim[-1] is None:
self.OutputOrganization0 = 'NCHW'
else:
self.OutputOrganization0 = 'NHWC'
else:
self.FixedPatch = 'true'
self.PatchSize = np.str(input_dim[2])
if input_dim[-1] < input_dim[-2] and input_dim[-1] < input_dim[-3]:
self.InputOrganization0 = 'NHWC'
self.Channels = np.str(input_dim[-1])
else:
self.InputOrganization0 = 'NCHW'
self.Channels = np.str(input_dim[1])
if output_dim[-1] < output_dim[-2] and output_dim[-1] < output_dim[-3]:
self.OutputOrganization0 = 'NHWC'
else:
self.OutputOrganization0 = 'NCHW'
# Adapt the format from brackets to parenthesis
input_dim = np.str(input_dim)
input_dim = input_dim.replace('(', ',')
input_dim = input_dim.replace(')', ',')
input_dim = input_dim.replace('None', '-1')
input_dim = input_dim.replace(' ', "")
self.InputTensorDimensions = input_dim
def _pixel_half_receptive_field(self, tf_model):
"""
The halo is equivalent to the receptive field of one pixel. This value
is used for image reconstruction when a entire image is processed.
"""
input_shape = tf_model.input_shape
if self.FixedPatch == 'false':
min_size = 50*np.int(self.MinimumSize)
if self.InputOrganization0 == 'NHWC':
null_im = np.zeros((1, min_size, min_size, input_shape[-1])
, dtype=np.float32)
else:
null_im = np.zeros((1, input_shape[1], min_size, min_size)
, dtype=np.float32)
else:
null_im = np.zeros((input_shape[1:])
, dtype=np.float32)
null_im = np.expand_dims(null_im, axis=0)
min_size = np.int(self.PatchSize)
point_im = np.zeros_like(null_im)
min_size = np.int(min_size/2)
if self.InputOrganization0 == 'NHWC':
point_im[0,min_size,min_size] = 1
else:
point_im[0,:,min_size,min_size] = 1
result_unit = tf_model.predict(np.concatenate((null_im, point_im)))
D = np.abs(result_unit[0]-result_unit[1])>0
if self.InputOrganization0 == 'NHWC':
D = D[:,:,0]
else:
D = D[0,:,:]
ind = np.where(D[:min_size,:min_size]==1)
halo = np.min(ind[1])
halo = min_size-halo+1
return halo
class TestImage:
def __add__(self, input_im, output_im, pixel_size):
"""
pixel size must be given in microns
"""
self.Input_shape = '{0}x{1}'.format(input_im.shape[0], input_im.shape[1])
self.InputImage = input_im
self.Output_shape = '{0}x{1}'.format(output_im.shape[0], output_im.shape[1])
self.OutputImage = output_im
self.MemoryPeak = 'null'
self.Runtime = 'null'
self.PixelSize = '{0}µmx{1}µm'.format(pixel_size, pixel_size)
def add_test_info(self, input_im, output_im, pixel_size):
self.test_info = self.TestImage()
self.test_info.__add__(input_im, output_im, pixel_size)
def add_preprocessing(self, file, name):
file_extension = file.split('.')[-1]
name = name + '.' + file_extension
if name.startswith('preprocessing'):
self.Preprocessing.insert(len(self.Preprocessing),name)
else:
name = "preprocessing_"+name
self.Preprocessing.insert(len(self.Preprocessing),name)
self.Preprocessing_files.insert(len(self.Preprocessing_files), file)
def add_postprocessing(self, file, name):
file_extension = file.split('.')[-1]
name = name + '.' + file_extension
if name.startswith('postprocessing'):
self.Postprocessing.insert(len(self.Postprocessing), name)
else:
name = "postprocessing_" + name
self.Postprocessing.insert(len(self.Postprocessing), name)
self.Postprocessing_files.insert(len(self.Postprocessing_files), file)
def export_model(self, tf_model,deepimagej_model_path, **kwargs):
"""
Main function to export the model as a bundled model of DeepImageJ
tf_model: tensorflow/keras model
deepimagej_model_path: directory where DeepImageJ model is stored.
"""
# Save the mode as protobuffer
self.save_tensorflow_pb(tf_model, deepimagej_model_path)
# extract the information about the testing image
test_info = self.test_info
io.imsave(os.path.join(deepimagej_model_path,'exampleImage.tiff'), self.test_info.InputImage)
io.imsave(os.path.join(deepimagej_model_path,'resultImage.tiff'), self.test_info.OutputImage)
print("Example images stored.")
# write the DeepImageJ configuration as an xml file
write_config(self, test_info, deepimagej_model_path)
# Add preprocessing and postprocessing macros.
# More than one is available, but the first one is set by default.
for i in range(len(self.Preprocessing)):
shutil.copy2(self.Preprocessing_files[i], os.path.join(deepimagej_model_path, self.Preprocessing[i]))
print("ImageJ macro {} included in the bundled model.".format(self.Preprocessing[i]))
for i in range(len(self.Postprocessing)):
shutil.copy2(self.Postprocessing_files[i], os.path.join(deepimagej_model_path, self.Postprocessing[i]))
print("ImageJ macro {} included in the bundled model.".format(self.Postprocessing[i]))
# Zip the bundled model to download
shutil.make_archive(deepimagej_model_path, 'zip', deepimagej_model_path)
print("DeepImageJ model was successfully exported as {0}.zip. You can download and start using it in DeepImageJ.".format(deepimagej_model_path))
def save_tensorflow_pb(self,tf_model, deepimagej_model_path):
# Check whether the folder to save the DeepImageJ bundled model exists.
# If so, it needs to be removed (TensorFlow requirements)
# -------------- Other definitions -----------
W = '\033[0m' # white (normal)
R = '\033[31m' # red
if os.path.exists(deepimagej_model_path):
print(R+'!! WARNING: DeepImageJ model folder already existed and has been removed !!'+W)
shutil.rmtree(deepimagej_model_path)
import tensorflow as tf
TF_VERSION = tf.__version__
print("DeepImageJ model will be exported using TensorFlow version {0}".format(TF_VERSION))
if TF_VERSION[:3] == "2.3":
print(R+"DeepImageJ plugin is only compatible with TensorFlow version 1.x, 2.0.0, 2.1.0 and 2.2.0. Later versions are not suported in DeepImageJ."+W)
def _save_model():
if tf_version==2:
"""TODO: change it once TF 2.3.0 is available in JAVA"""
from tensorflow.compat.v1 import saved_model
from tensorflow.compat.v1.keras.backend import get_session
else:
from tensorflow import saved_model
from keras.backend import get_session
builder = saved_model.builder.SavedModelBuilder(deepimagej_model_path)
signature = saved_model.signature_def_utils.predict_signature_def(
inputs = {'input': tf_model.input},
outputs = {'output': tf_model.output} )
signature_def_map = { saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature }
builder.add_meta_graph_and_variables( get_session(),
[saved_model.tag_constants.SERVING],
signature_def_map=signature_def_map )
builder.save()
print("TensorFlow model exported to {0}".format(deepimagej_model_path))
if TF_VERSION[0] == '1':
tf_version = 1
_save_model()
else:
tf_version = 2
"""TODO: change it once TF 2.3.0 is available in JAVA"""
from tensorflow.keras.models import clone_model
_weights = tf_model.get_weights(tf_model)
with tf.Graph().as_default():
# clone model in new graph and set weights
_model = clone_model(tf_model)
_model.set_weights(_weights)
_save_model()
def write_config(Config, TestInfo, config_path):
"""
- Config: Class with all the information about the model's architecture and pre/post-processing
- TestInfo: Metadata of the image provided as an example
- config_path: path to the template of the configuration file.
It can be downloaded from:
https://raw.githubusercontent.com/deepimagej/python4deepimagej/blob/master/xml/config_template.xml
The function updates the fields in the template provided with the
information about the model and the example image.
"""
urllib.request.urlretrieve("https://raw.githubusercontent.com/deepimagej/python4deepimagej/master/xml/config_template.xml", "config_template.xml")
try:
tree = ET.parse('config_template.xml')
root = tree.getroot()
except:
print("config_template.xml not found.")
# WorkCitation-Credits
root[0][0].text = Config.Name
root[0][1].text = Config.Authors
root[0][2].text = Config.URL
root[0][3].text = Config.Credits
root[0][4].text = Config.Version
root[0][5].text = Config.Date
root[0][6].text = Config.References
# ExampleImage
root[1][0].text = TestInfo.Input_shape
root[1][1].text = TestInfo.Output_shape
root[1][2].text = TestInfo.MemoryPeak
root[1][3].text = TestInfo.Runtime
root[1][4].text = TestInfo.PixelSize
# ModelArchitecture
root[2][0].text = 'tf.saved_model.tag_constants.SERVING'
root[2][1].text = 'tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY'
root[2][2].text = Config.InputTensorDimensions
root[2][3].text = '1'
root[2][4].text = 'input'
root[2][5].text = Config.InputOrganization0
root[2][6].text = | |
self.match(self.input, 75, self.FOLLOW_75_in_do_while_stmt669)
if self._state.backtracking == 0:
stream_75.add(char_literal69)
self._state.following.append(self.FOLLOW_expr_in_do_while_stmt671)
expr70 = self.expr()
self._state.following.pop()
if self._state.backtracking == 0:
stream_expr.add(expr70.tree)
char_literal71 = self.match(self.input, 76, self.FOLLOW_76_in_do_while_stmt673)
if self._state.backtracking == 0:
stream_76.add(char_literal71)
char_literal72 = self.match(self.input, 92, self.FOLLOW_92_in_do_while_stmt675)
if self._state.backtracking == 0:
stream_92.add(char_literal72)
# AST Rewrite
# elements: expr, block
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
if self._state.backtracking == 0:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
# 121:3: -> ^( DO_WHILE block expr )
# Expr.g:121:6: ^( DO_WHILE block expr )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(DO_WHILE, "DO_WHILE")
, root_1)
self._adaptor.addChild(root_1, stream_block.nextTree())
self._adaptor.addChild(root_1, stream_expr.nextTree())
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
if self._state.backtracking == 0:
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "do_while_stmt"
class switch_stmt_return(ParserRuleReturnScope):
def __init__(self):
super(ExprParser.switch_stmt_return, self).__init__()
self.tree = None
# $ANTLR start "switch_stmt"
# Expr.g:124:1: switch_stmt : 'switch' '(' expr ')' case_block -> ^( SWITCH expr case_block ) ;
def switch_stmt(self, ):
retval = self.switch_stmt_return()
retval.start = self.input.LT(1)
root_0 = None
string_literal73 = None
char_literal74 = None
char_literal76 = None
expr75 = None
case_block77 = None
string_literal73_tree = None
char_literal74_tree = None
char_literal76_tree = None
stream_128 = RewriteRuleTokenStream(self._adaptor, "token 128")
stream_75 = RewriteRuleTokenStream(self._adaptor, "token 75")
stream_76 = RewriteRuleTokenStream(self._adaptor, "token 76")
stream_case_block = RewriteRuleSubtreeStream(self._adaptor, "rule case_block")
stream_expr = RewriteRuleSubtreeStream(self._adaptor, "rule expr")
try:
try:
# Expr.g:125:2: ( 'switch' '(' expr ')' case_block -> ^( SWITCH expr case_block ) )
# Expr.g:125:4: 'switch' '(' expr ')' case_block
pass
string_literal73 = self.match(self.input, 128, self.FOLLOW_128_in_switch_stmt698)
if self._state.backtracking == 0:
stream_128.add(string_literal73)
char_literal74 = self.match(self.input, 75, self.FOLLOW_75_in_switch_stmt700)
if self._state.backtracking == 0:
stream_75.add(char_literal74)
self._state.following.append(self.FOLLOW_expr_in_switch_stmt702)
expr75 = self.expr()
self._state.following.pop()
if self._state.backtracking == 0:
stream_expr.add(expr75.tree)
char_literal76 = self.match(self.input, 76, self.FOLLOW_76_in_switch_stmt704)
if self._state.backtracking == 0:
stream_76.add(char_literal76)
self._state.following.append(self.FOLLOW_case_block_in_switch_stmt706)
case_block77 = self.case_block()
self._state.following.pop()
if self._state.backtracking == 0:
stream_case_block.add(case_block77.tree)
# AST Rewrite
# elements: case_block, expr
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
if self._state.backtracking == 0:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
# 126:3: -> ^( SWITCH expr case_block )
# Expr.g:126:6: ^( SWITCH expr case_block )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(SWITCH, "SWITCH")
, root_1)
self._adaptor.addChild(root_1, stream_expr.nextTree())
self._adaptor.addChild(root_1, stream_case_block.nextTree())
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
if self._state.backtracking == 0:
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "switch_stmt"
class case_block_return(ParserRuleReturnScope):
def __init__(self):
super(ExprParser.case_block_return, self).__init__()
self.tree = None
# $ANTLR start "case_block"
# Expr.g:128:1: case_block : '{' ( case_clause )+ ( default_clause )? '}' ;
def case_block(self, ):
retval = self.case_block_return()
retval.start = self.input.LT(1)
root_0 = None
char_literal78 = None
char_literal81 = None
case_clause79 = None
default_clause80 = None
char_literal78_tree = None
char_literal81_tree = None
try:
try:
# Expr.g:129:2: ( '{' ( case_clause )+ ( default_clause )? '}' )
# Expr.g:129:4: '{' ( case_clause )+ ( default_clause )? '}'
pass
root_0 = self._adaptor.nil()
char_literal78 = self.match(self.input, 132, self.FOLLOW_132_in_case_block728)
if self._state.backtracking == 0:
char_literal78_tree = self._adaptor.createWithPayload(char_literal78)
self._adaptor.addChild(root_0, char_literal78_tree)
# Expr.g:129:8: ( case_clause )+
cnt11 = 0
while True: #loop11
alt11 = 2
LA11_0 = self.input.LA(1)
if (LA11_0 == 106) :
alt11 = 1
if alt11 == 1:
# Expr.g:129:9: case_clause
pass
self._state.following.append(self.FOLLOW_case_clause_in_case_block731)
case_clause79 = self.case_clause()
self._state.following.pop()
if self._state.backtracking == 0:
self._adaptor.addChild(root_0, case_clause79.tree)
else:
if cnt11 >= 1:
break #loop11
if self._state.backtracking > 0:
raise BacktrackingFailed
eee = EarlyExitException(11, self.input)
raise eee
cnt11 += 1
# Expr.g:129:23: ( default_clause )?
alt12 = 2
LA12_0 = self.input.LA(1)
if (LA12_0 == 110) :
alt12 = 1
if alt12 == 1:
# Expr.g:129:24: default_clause
pass
self._state.following.append(self.FOLLOW_default_clause_in_case_block736)
default_clause80 = self.default_clause()
self._state.following.pop()
if self._state.backtracking == 0:
self._adaptor.addChild(root_0, default_clause80.tree)
char_literal81 = self.match(self.input, 136, self.FOLLOW_136_in_case_block740)
if self._state.backtracking == 0:
char_literal81_tree = self._adaptor.createWithPayload(char_literal81)
self._adaptor.addChild(root_0, char_literal81_tree)
retval.stop = self.input.LT(-1)
if self._state.backtracking == 0:
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "case_block"
class case_clause_return(ParserRuleReturnScope):
def __init__(self):
super(ExprParser.case_clause_return, self).__init__()
self.tree = None
# $ANTLR start "case_clause"
# Expr.g:131:1: case_clause : ( case_test )+ ( stmt )* break_stmt -> ^( CASE ( case_test )+ ( stmt )* break_stmt ) ;
def case_clause(self, ):
retval = self.case_clause_return()
retval.start = self.input.LT(1)
root_0 = None
case_test82 = None
stmt83 = None
break_stmt84 = None
stream_case_test = RewriteRuleSubtreeStream(self._adaptor, "rule case_test")
stream_stmt = RewriteRuleSubtreeStream(self._adaptor, "rule stmt")
stream_break_stmt = RewriteRuleSubtreeStream(self._adaptor, "rule break_stmt")
try:
try:
# Expr.g:132:2: ( ( case_test )+ ( stmt )* break_stmt -> ^( CASE ( case_test )+ ( stmt )* break_stmt ) )
# Expr.g:132:4: ( case_test )+ ( stmt )* break_stmt
pass
# Expr.g:132:4: ( case_test )+
cnt13 = 0
while True: #loop13
alt13 = 2
LA13_0 = self.input.LA(1)
if (LA13_0 == 106) :
alt13 = 1
if alt13 == 1:
# Expr.g:132:4: case_test
pass
self._state.following.append(self.FOLLOW_case_test_in_case_clause750)
case_test82 = self.case_test()
self._state.following.pop()
if self._state.backtracking == 0:
stream_case_test.add(case_test82.tree)
else:
if cnt13 >= 1:
break #loop13
if self._state.backtracking > 0:
raise BacktrackingFailed
eee = EarlyExitException(13, self.input)
raise eee
cnt13 += 1
# Expr.g:132:15: ( stmt )*
while True: #loop14
alt14 = 2
LA14_0 = self.input.LA(1)
if (LA14_0 == 105) :
LA14_1 = self.input.LA(2)
if (LA14_1 == 92) :
LA14_3 = self.input.LA(3)
if (LA14_3 == ID or LA14_3 == 80 or LA14_3 == 84 or LA14_3 == 92 or LA14_3 == 105 or (108 <= LA14_3 <= 109) or LA14_3 == 111 or (115 <= LA14_3 <= 119) or (122 <= LA14_3 <= 123) or LA14_3 == 125 or (128 <= LA14_3 <= 131)) :
alt14 = 1
elif (LA14_0 == ID or LA14_0 == 80 or LA14_0 == 84 or LA14_0 == 92 or (108 <= LA14_0 <= 109) or LA14_0 == 111 or (115 <= LA14_0 <= 119) or (122 <= LA14_0 <= 123) or LA14_0 == 125 or (128 <= LA14_0 <= 131)) :
alt14 = 1
if alt14 == 1:
# Expr.g:132:15: stmt
pass
self._state.following.append(self.FOLLOW_stmt_in_case_clause753)
stmt83 = self.stmt()
self._state.following.pop()
if self._state.backtracking == 0:
stream_stmt.add(stmt83.tree)
else:
break #loop14
self._state.following.append(self.FOLLOW_break_stmt_in_case_clause756)
break_stmt84 = self.break_stmt()
self._state.following.pop()
if self._state.backtracking == 0:
stream_break_stmt.add(break_stmt84.tree)
# AST Rewrite
# elements: stmt, case_test, break_stmt
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
if self._state.backtracking == 0:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
# 133:3: -> ^( CASE ( case_test )+ ( stmt )* break_stmt )
# Expr.g:133:6: ^( CASE ( case_test )+ ( stmt )* break_stmt )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(CASE, "CASE")
, root_1)
# Expr.g:133:13: ( case_test )+
if not (stream_case_test.hasNext()):
raise RewriteEarlyExitException()
while stream_case_test.hasNext():
self._adaptor.addChild(root_1, stream_case_test.nextTree())
stream_case_test.reset()
# Expr.g:133:24: ( stmt )*
while stream_stmt.hasNext():
self._adaptor.addChild(root_1, stream_stmt.nextTree())
stream_stmt.reset();
self._adaptor.addChild(root_1, stream_break_stmt.nextTree())
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
if self._state.backtracking == 0:
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "case_clause"
class case_test_return(ParserRuleReturnScope):
def __init__(self):
super(ExprParser.case_test_return, self).__init__()
self.tree = None
# $ANTLR start "case_test"
# Expr.g:135:1: case_test : 'case' expr ':' -> ^( CASE expr ) ;
def case_test(self, ):
retval = self.case_test_return()
retval.start = self.input.LT(1)
root_0 = None
string_literal85 = None
char_literal87 = None
expr86 = None
string_literal85_tree = None
char_literal87_tree = None
stream_91 = RewriteRuleTokenStream(self._adaptor, "token 91")
stream_106 = RewriteRuleTokenStream(self._adaptor, "token 106")
stream_expr = RewriteRuleSubtreeStream(self._adaptor, "rule expr")
try:
| |
import time
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import ElementNotVisibleException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
class UICheckException(Exception):
def __init__(self, message):
raise Exception(message)
class SeleniumApi(object):
def __init__(self, driver):
"""
:param driver: webdriver
"""
assert isinstance(driver, webdriver.Firefox)
self.driver = driver
retry = 3
timeout_to_locate_element_in_seconds = 60
timeout_to_determine_visibility_in_seconds = 120
timeout_to_determine_if_clickable_in_seconds = 120
timeout_to_wait_for_text_in_seconds = 240
implicit_wait_default_in_seconds = 120
timeout_to_check_for_visibility_in_seconds = 10
def set_implicit_wait(self, implicit_wait_time):
"""
Sets implicit wait for webdriver
:param implicit_wait_time:
"""
self.driver.implicitly_wait(implicit_wait_time)
def close_browser(self):
"""
Closes firefox.
"""
self.driver.quit()
def get_url(self):
"""
Returns currentv url.
"""
url = self.driver.current_url.encode('ascii', 'ignore')
url = str(url)
return url
def zoom_out(self):
self.driver.find_element_by_tag_name('body').send_keys(Keys.CONTROL + "-")
self.driver.find_element_by_tag_name('body').send_keys(Keys.CONTROL + "-")
def is_clickable_by_id(self, element_id):
"""
:param element_id:
"""
element = self.driver.find_element_by_id(element_id)
return element.is_enabled()
def wait_for_element_present_by_id(self, element_id):
"""
Waits for element to be present on the page for timeout_to_locate_element_in_seconds
Checks for presence every 500 milliseconds
"""
print "Executing wait_for_element_present_by_id('{0}')".format(element_id)
print "Looking for element id = '{0}' in the DOM".format(element_id)
try:
WebDriverWait(self.driver, self.timeout_to_locate_element_in_seconds).until(EC.presence_of_element_located(
(By.ID, element_id)))
print "Found element by id = '{0}'".format(element_id)
except NoSuchElementException, nse:
print "Did not find element by id = '{0}'".format(element_id)
raise
except TimeoutException, t:
print "ERROR: Timed out. Did not find element by id = '{0}'.".format(element_id)
raise
def wait_for_element_present_by_css(self, css_selector):
"""
Waits for element to be present on the page for timeout_to_locate_element_in_seconds
Checks for presence every 500 milliseconds
"""
print "Executing wait_for_element_present_by_css ('{0}')".format(css_selector)
print "Looking for element by css = '{0}' in the DOM.".format(css_selector)
try:
WebDriverWait(self.driver, self.timeout_to_locate_element_in_seconds).until(EC.presence_of_element_located(
(By.CSS_SELECTOR, css_selector)))
print "Found element by css = '{0}'".format(css_selector)
except NoSuchElementException, nse:
print "Did not find element by css = '{0}'".format(css_selector)
raise
except TimeoutException, t:
print "ERROR: Timed out. Did not find element by css = '{0}'".format(css_selector)
raise
def wait_for_element_present_by_link_text(self, link_text):
"""
Waits for element to be present on the page for timeout_to_locate_element_in_seconds
Checks for presence every 500 milliseconds
:param link_text:
"""
print "Executing wait_for_element_present_by_link_text ('{0}')".format(link_text)
print "Looking for element by link_text = '{0}' in the DOM.".format(link_text)
try:
WebDriverWait(self.driver, self.timeout_to_locate_element_in_seconds).until(EC.presence_of_element_located(
(By.LINK_TEXT, link_text)))
print "Found element by link_text = '{0}'".format(link_text)
except NoSuchElementException, nse:
print "Did not find element by link_text = '{0}'".format(link_text)
raise
except TimeoutException, t:
print "ERROR: Timed out. Did not find element by link_text = '{0}'".format(link_text)
raise
def wait_for_element_present_by_name(self, name):
"""
Waits for element to be present on the page for timeout_to_locate_element_in_seconds
Checks for presence every 500 milliseconds
:param name:
"""
print "Executing wait_for_element_present_by_name ('{0}')".format(name)
print "Looking for element by name = '{0}' in the DOM.".format(name)
try:
WebDriverWait(self.driver, self.timeout_to_locate_element_in_seconds).until(EC.presence_of_element_located(
(By.NAME, name)))
print "Found element by name = '{0}'".format(name)
except NoSuchElementException, nse:
print "Did not find element by name = '{0}'".format(name)
raise
except TimeoutException, t:
print "ERROR: Timed out. Did not find element by name = '{0}'".format(name)
raise
def wait_for_visible_by_id(self, element_id, timeout_in_seconds=None):
"""
Waits for the element to become visible. First, checks if element is present. Does not raise Error on exception!
:param timeout_in_seconds:
:param element_id:
"""
if timeout_in_seconds is None:
timeout_in_seconds = self.timeout_to_determine_visibility_in_seconds
print "Executing wait_for_visible_by_id('{0}'), timeout_in_seconds is set to {1}".format(
element_id, timeout_in_seconds)
try:
WebDriverWait(self.driver, timeout_in_seconds).until(EC.visibility_of_element_located((By.ID, element_id)))
print "Element by id = '{0}' was located in DOM and is visible.".format(element_id)
except TimeoutException:
print "ERROR: Timed out: element by id = '{0}' not visible.".format(element_id)
print "Checking whether element by id = '{0}' present in the DOM.".format(element_id)
try:
self.driver.find_element_by_id(element_id)
print "Element by id = '{0}' is present in the DOM but not visible.".format(element_id)
except NoSuchElementException:
print "ERROR: Element by id = '{0}' not found in the DOM.".format(element_id)
return False
def wait_for_visible_by_css(self, css, timeout_in_seconds=None):
"""
Waits for the element to become visible. First, checks if element is present.
:param timeout_in_seconds:
:param css:
"""
if timeout_in_seconds is None:
timeout_in_seconds = self.timeout_to_determine_visibility_in_seconds
print "Executing wait_for_visible_by_css('{0}'), timeout_in_seconds is set to {1}".format(css,
timeout_in_seconds)
try:
WebDriverWait(self.driver, timeout_in_seconds).until(
EC.visibility_of_element_located((By.CSS_SELECTOR, css)))
print "Element by css = '{0}' was located in DOM and is visible.".format(css)
except TimeoutException:
print "ERROR: Timed out: element by css = '{0}' not visible.".format(css)
print "Checking whether element by css = '{0}' present in the DOM.".format(css)
try:
self.driver.find_element_by_css_selector(css)
print "Element by css = '{0}' is present in the DOM but not visible.".format(css)
except NoSuchElementException:
print "ERROR: Element by css = '{0}' not found in the DOM.".format(css)
return False
except Exception, e:
print "ERROR: Unknown Exception e thrown by webdriver."
print "Element was not found"
pass
def wait_for_visible_by_xpath(self, xpath, timeout_in_seconds=None):
"""
Waits for the element to become visible. First, checks if element is present.
:param xpath:
"""
if timeout_in_seconds is None:
timeout_in_seconds = self.timeout_to_determine_visibility_in_seconds
print "Executing wait_for_visible_by_xpath('{0}'), timeout_in_seconds is set to {1}".format(xpath,
timeout_in_seconds)
try:
WebDriverWait(self.driver, timeout_in_seconds).until(EC.visibility_of_element_located((By.XPATH, xpath)))
print "Element by xpath = '{0}' was located in DOM and is visible.".format(xpath)
except TimeoutException:
print "ERROR: Timed out: element by xpath = '{0}' not visible.".format(xpath)
print "Checking whether element by xpath = '{0}' present in the DOM.".format(xpath)
try:
self.driver.find_element_by_xpath(xpath)
print "Element by xpath = '{0}' is present in the DOM but not visible.".format(xpath)
except NoSuchElementException:
print "ERROR: Element by xpath = '{0}' not found in the DOM.".format(xpath)
return False
def verify_visible_by_id(self, element_id, timeout_in_seconds=None):
"""
Waits for the element to become visible. First, checks if element is present. Raises Error on exception.
:param timeout_in_seconds:
:param element_id:
"""
if timeout_in_seconds is None:
timeout_in_seconds = self.timeout_to_determine_visibility_in_seconds
print "Executing verify_visible_by_id('{0}'), timeout_in_seconds is set to {1}".format(
element_id, timeout_in_seconds)
try:
WebDriverWait(self.driver, timeout_in_seconds).until(EC.visibility_of_element_located((By.ID, element_id)))
print "Element by id = '{0}' was located in DOM and is visible.".format(element_id)
except TimeoutException:
print "ERROR: Timed out: element by id = '{0}' not visible.".format(element_id)
print "Checking whether element by id = '{0}' present in the DOM.".format(element_id)
try:
self.driver.find_element_by_id(element_id)
print "Element by id = '{0}' is present in the DOM but not visible.".format(element_id)
except NoSuchElementException:
print "ERROR: Element by id = '{0}' not found in the DOM.".format(element_id)
raise
raise
def verify_visible_by_css(self, css, timeout_in_seconds=None):
"""
Waits for the element to become visible. First, checks if element is present. Raises Error on exception.
:param timeout_in_seconds:
:param css:
"""
if timeout_in_seconds is None:
timeout_in_seconds = self.timeout_to_determine_visibility_in_seconds
print "Executing verify_visible_by_css('{0}'), timeout_in_seconds is set to {1}".format(css,
timeout_in_seconds)
try:
WebDriverWait(self.driver, timeout_in_seconds).until(
EC.visibility_of_element_located((By.CSS_SELECTOR, css)))
print "Element by css = '{0}' was located in DOM and is visible.".format(css)
except TimeoutException:
print "ERROR: Timed out: element by css = '{0}' not visible.".format(css)
print "Checking whether element by css = '{0}' present in the DOM.".format(css)
try:
self.driver.find_element_by_css_selector(css)
print "Element by css = '{0}' is present in the DOM but not visible.".format(css)
except NoSuchElementException:
print "ERROR: Element by css = '{0}' not found in the DOM.".format(css)
raise
raise
except Exception, e:
print "ERROR: Unknown Exception e thrown by webdriver."
print "Element was not found"
raise
def verify_visible_by_xpath(self, xpath, timeout_in_seconds=None):
"""
Waits for the element to become visible. First, checks if element is present. Raises Error on exception.
:param xpath:
"""
if timeout_in_seconds is None:
timeout_in_seconds = self.timeout_to_determine_visibility_in_seconds
print "Executing verify_visible_by_xpath('{0}'), timeout_in_seconds is set to {1}".format(xpath,
timeout_in_seconds)
try:
WebDriverWait(self.driver, timeout_in_seconds).until(EC.visibility_of_element_located((By.XPATH, xpath)))
print "Element by xpath = '{0}' was located in DOM and is visible.".format(xpath)
except TimeoutException:
print "ERROR: Timed out: element by xpath = '{0}' not visible.".format(xpath)
print "Checking whether element by xpath = '{0}' present in the DOM.".format(xpath)
try:
self.driver.find_element_by_xpath(xpath)
print "Element by xpath = '{0}' is present in the DOM but not visible.".format(xpath)
except NoSuchElementException:
print "ERROR: Element by xpath = '{0}' not found in the DOM.".format(xpath)
raise
raise
def check_visibility_by_id(self, element_id):
"""
Checks if the element is visible.
:param element_id:
"""
print "Executing check_visibility_by_id('{0}')".format(element_id)
try:
self.set_implicit_wait(1)
WebDriverWait(self.driver, self.timeout_to_check_for_visibility_in_seconds).until(
EC.visibility_of_element_located((By.ID, element_id)))
self.set_implicit_wait(self.implicit_wait_default_in_seconds)
print "Element by id = '{0}' was located in DOM and is visible.".format(element_id)
return True
except TimeoutException:
print "Element by id = '{0}' not visible.".format(element_id)
return False
def check_visibility_by_css(self, css):
"""
Checks if the element is visible.
:param css:
"""
print "Executing check_visibility_by_css('{0}')".format(css)
try:
self.set_implicit_wait(0)
WebDriverWait(self.driver, self.timeout_to_check_for_visibility_in_seconds).until(
EC.visibility_of_element_located((By.CSS_SELECTOR, css)))
self.set_implicit_wait(self.implicit_wait_default_in_seconds)
print "Element by css = '{0}' was located in DOM and is visible.".format(css)
return True
except TimeoutException:
print "Element by css = '{0}' not visible.".format(css)
| |
self.directed else self.alpha[link[1]]
if not self.poisson_me:
if self.hawkes_me:
lambda_ij[link] += 0 if not after_first_event else np.sum(scaled_exponential(t-self.node_ts[link[0]][:(max_ind+1)], self.mu[link[0]],self.phi[link[0]]))
lambda_ij[link] += 0 if not after_first_event_prime else np.sum(scaled_exponential(t-(self.node_ts_prime[link[1]][:(max_ind_prime+1)] if self.directed else self.node_ts[link[1]][:(max_ind_prime+1)]),
self.mu_prime[link[1]] if self.directed else self.mu[link[1]], self.phi_prime[link[1]] if self.directed else self.phi[link[1]]))
else:
lambda_ij[link] += 0 if not after_first_event else scaled_exponential(t-self.node_ts[link[0]][max_ind], self.mu[link[0]],self.phi[link[0]])
lambda_ij[link] += 0 if not after_first_event_prime else scaled_exponential(t-(self.node_ts_prime[link[1]][max_ind_prime] if self.directed else self.node_ts[link[1]][max_ind_prime]),
self.mu_prime[link[1]] if self.directed else self.mu[link[1]], self.phi_prime[link[1]] if self.directed else self.phi[link[1]])
## Add intensities for interactions (when present)
if self.interactions:
lambda_ij[link] += np.sum(self.gamma[link[0]] * self.gamma_prime[link[1]])
if not self.poisson_int:
after_first_edge_event = np.any(np.array(self.A[link]) <= t) if right_limit else np.any(np.array(self.A[link]) < t)
if after_first_edge_event:
max_edge_ind = np.max(np.where(np.array(self.A[link]) <= t)) if right_limit else np.max(np.where(np.array(self.A[link]) < t))
if self.hawkes_int:
if not after_first_edge_event:
lambda_ij[link] += 0
else:
if len(np.array(self.A[link])[:(max_edge_ind+1)]) <= 1:
lambda_ij[link] += np.sum(scaled_exponential_prod(t-np.array(self.A[link])[:(max_edge_ind+1)],
self.nu[link[0]], self.nu_prime[link[1]] if self.directed else self.nu[link[1]], self.theta[link[0]], self.theta_prime[link[1]] if self.directed else self.theta[link[1]]))
else:
lambda_ij[link] += np.sum(scaled_exponential_prod_vec(t-np.array(self.A[link])[:(max_edge_ind+1)],
self.nu[link[0]], self.nu_prime[link[1]] if self.directed else self.nu[link[1]], self.theta[link[0]], self.theta_prime[link[1]] if self.directed else self.theta[link[1]]))
else:
lambda_ij[link] += 0 if not after_first_edge_event else np.sum(scaled_exponential_prod(t-self.A[link][max_edge_ind],
self.nu[link[0]], self.nu_prime[link[1]] if self.directed else self.nu[link[1]], self.theta[link[0]], self.theta_prime[link[1]] if self.directed else self.theta[link[1]]))
## Sum to obtain overall intensity
lambda_tot = sum(lambda_ij.values())
## Return the outputs
return lambda_ij, lambda_tot
## Simulation of the process
def simulate(self, T=None, m=None, copy_dict=False, verbose=False):
if T is None:
T = self.T
if m is None:
m = 1e9
## Initialise adjacency matrix
if copy_dict:
self.A_initial = copy.deepcopy(self.A)
## Dynamic adjacency matrix must be empty
for link in self.A:
self.A[link] = []
self.node_ts[link[0]] = np.array([])
self.node_ts_prime[link[1]] = np.array([])
## Initialise arrival times
t_star = 0
n_events = 0
while t_star < T:
## If the graph is discrete, include the current arrival time
_, lambda_star = self.intensity(t_star, right_limit=True)
## Propose new arrival time
if self.discrete:
t_star += math.floor(-math.log(np.random.uniform(size=1)) / lambda_star)
else:
t_star -= math.log(np.random.uniform(size=1)) / lambda_star
if t_star > T or n_events >= m:
if verbose:
print("")
break
## Calculate intensities for each edge
lambda_ij, _ = self.intensity(t_star, right_limit=False)
## Calculate probabilities
links = list(lambda_ij.keys())
probs = [x / lambda_star for x in list(lambda_ij.values())]
self.probs = probs
self.t_star = t_star
sum_probs = np.sum(probs)
if sum_probs < 1:
probs += [1-sum_probs]
links += ['Reject']
else:
probs = [x / sum_probs for x in probs]
## Assign the arrival time to one of the links
assignment = np.random.choice(len(links),p=probs)
if links[assignment] != 'Reject':
n_events += 1
self.A[links[assignment]] = np.append(self.A[links[assignment]],t_star)
self.node_ts[links[assignment][0]] = np.append(self.node_ts[links[assignment][0]], t_star)
self.node_ts_prime[links[assignment][1]] = np.append(self.node_ts_prime[links[assignment][1]], t_star)
if verbose:
print("\r+++ Number of simulated events +++ {} - Time: {:0.3f}".format(n_events,t_star), end="")
if verbose:
print("")
## Calculate required quantities for evaluation of the likelihood
def likelihood_calculation_setup(self, verbose=False):
## Calculate the inter-arrival times on each edge
if verbose:
prop = 0
self.A_diff = {}
if self.discrete:
self.equal_start = Counter()
for link in self.A:
## if self.nij[link] > 1:
self.A_diff[link] = discrete_process_difference(self.A[link]) if self.discrete else np.diff(self.A[link])
if self.discrete:
self.equal_start[link] = np.sum(self.A[link] == self.A[link][0])
if verbose:
prop += self.nij[link]
print("\r+++ Percentage of processed links (time differences) +++ {:0.2f}%".format(prop / self.m * 100), end="")
if verbose:
prop = 0
print("")
if not self.poisson_me:
if self.main_effects:
## Set up the calculations for the psi coefficients (only for Hawkes process)
if self.hawkes_me:
self.psi_times = {}
self.psi_prime_times = {}
for link in self.A:
## Initialise the dictionary of dictionaries for the time differences used to calculate each psi
self.psi_times[link] = {}
self.psi_prime_times[link] = {}
## Obtain ell_k indices (connections on the given edge on the node time series)
ell_k = np.where(self.node_ts_edges[link[0]] == link[1])[0]
ts = self.node_ts[link[0]]
if self.discrete:
for k in range(len(ell_k)):
arrival_time = ts[ell_k[k]]
k_mod = ell_k[k]-1
while ts[k_mod] == arrival_time:
k_mod -= 1
if k_mod < 0:
break
ell_k[k] = k_mod+1
## if len(ell_k) != self.nij[link]:
## raise ValueError('len(ell_k) and nij must be equivalent.')
## Sequentially obtain the arrival times needed to calculate psi(k)
for k in range(len(ell_k)):
self.psi_times[link][k] = self.A[link][k] - ts[(ell_k[k-1]+1 if k != 0 else 0):ell_k[k]]
## Repeat for psi_prime
ell_k_prime = np.where((self.node_ts_prime_edges[link[1]] if self.directed else self.node_ts_edges[link[1]]) == link[0])[0]
ts_prime = (self.node_ts_prime if self.directed else self.node_ts)[link[1]]
if self.discrete:
for k in range(len(ell_k_prime)):
arrival_time = ts_prime[ell_k_prime[k]]
k_mod = ell_k_prime[k]-1
while ts_prime[k_mod] == arrival_time:
k_mod -= 1
if k_mod < 0:
break
ell_k_prime[k] = k_mod+1
## if len(ell_k_prime) != self.nij[link]:
## raise ValueError('len(ell_k_prime) and nij must be equivalent.')
for k in range(len(ell_k_prime)):
self.psi_prime_times[link][k] = self.A[link][k] - ts_prime[(ell_k_prime[k-1]+1 if k != 0 else 0):ell_k_prime[k]]
if verbose:
prop += self.nij[link]
print("\r+++ Percentage of processed links (separation of arrival times) +++ {:0.2f}%".format(prop / self.m * 100), end="")
else:
## Set up the calculations for the t bar arrival times (only for r=1 process)
self.A_bar = {}
self.A_bar_prime = {}
for link in self.A:
## Initialise the t bars
self.A_bar[link] = []
self.A_bar_prime[link] = []
## Obtain ell_k indices (connections on the given edge on the node time series)
ell_k = np.where(self.node_ts_edges[link[0]] == link[1])[0] - 1
ts = self.node_ts[link[0]]
if self.discrete:
for k in range(len(ell_k)):
arrival_time = ts[ell_k[k]+1]
k_mod = ell_k[k]
while ts[k_mod] == arrival_time:
k_mod -= 1
if k_mod < 0:
break
ell_k[k] = k_mod
indices = (ell_k >= 0)
self.A_bar[link] = self.A[link][indices] - ts[ell_k[indices]]
## Repeat for psi_prime
ell_k_prime = np.where((self.node_ts_prime_edges[link[1]] if self.directed else self.node_ts_edges[link[1]]) == link[0])[0] - 1
ts_prime = (self.node_ts_prime if self.directed else self.node_ts)[link[1]]
if self.discrete:
for k in range(len(ell_k_prime)):
arrival_time = ts_prime[ell_k_prime[k]+1]
k_mod = ell_k_prime[k]
while ts_prime[k_mod] == arrival_time:
k_mod -= 1
if k_mod < 0:
break
ell_k_prime[k] = k_mod
indices_prime = (ell_k_prime >= 0)
self.A_bar_prime[link] = self.A[link][indices_prime] - ts_prime[ell_k_prime[indices_prime]]
if verbose:
prop += self.nij[link]
print("\r+++ Percentage of processed links (calculation of t_bars) +++ {:0.2f}%".format(prop / self.m * 100), end="")
if verbose:
print("")
## Recursive calculations of psi
def psi_calculation(self, calculate_derivative=False, verbose=False):
## Psi coefficients are only required when the Hawkes process is used
# if (self.main_effects and (self.poisson_me or not self.hawkes_me)) or (self.interactions and (self.poisson_int or not self.hawkes_int)):
# warnings.warn("psi_calculation should be used only if Hawkes processes for main effects or interactions are used.", Warning)
if not self.poisson_me or not self.poisson_int:
## Main effects with Hawkes
if self.main_effects and not self.poisson_me and self.hawkes_me:
## Initialise the dictionaries
self.psi = {}
self.psi_prime = {}
## Initialise the dictionaries for the derivatives (if required)
if calculate_derivative:
self.psi_derivative = {}
self.psi_prime_derivative = {}
if verbose:
prop = 0
for link in self.A:
## Initialise the dictionary of dictionaries for each link (key 1: link, key 2: k)
self.psi[link] = {}
self.psi_prime[link] = {}
if calculate_derivative:
self.psi_derivative[link] = {}
self.psi_prime_derivative[link] = {}
## Obtain the parameters
mu = self.mu[link[0]]
mu_prime = self.mu_prime[link[1]] if self.directed else self.mu[link[1]]
phi = self.phi[link[0]]
phi_prime = self.phi_prime[link[1]] if self.directed else self.phi[link[1]]
## Obtain the times from psi_times
times = self.psi_times[link][0]
times_prime = self.psi_prime_times[link][0]
## Calculate the required psi (and derivatives if required)
self.psi[link][0] = np.sum(np.exp(-(mu+phi) * times))
self.psi_prime[link][0] = np.sum(np.exp(-(mu_prime+phi_prime) * times_prime))
if calculate_derivative:
self.psi_derivative[link][0] = - np.sum(times * np.exp(-(mu+phi) * times))
self.psi_prime_derivative[link][0] = - np.sum(times_prime * np.exp(-(mu_prime+phi_prime) * times_prime))
## Calculate the remaining values of psi sequentially
if self.nij[link] > 1:
tdiff = np.diff(self.A[link]) if self.discrete else self.A_diff[link]
for k in range(1,self.nij[link]):
times = self.psi_times[link][k]
times_prime = self.psi_prime_times[link][k]
t_diff = tdiff[k-1]
self.psi[link][k] = np.exp(-(mu+phi) * t_diff) * (1 + self.psi[link][k-1]) + (np.sum(np.exp(-(mu+phi) * times)) if len(times) > 0 else 0)
self.psi_prime[link][k] = np.exp(-(mu_prime+phi_prime) * t_diff) * (1 + self.psi_prime[link][k-1]) + (np.sum(np.exp(-(mu_prime+phi_prime) * times_prime)) if len(times_prime) > 0 else 0)
if calculate_derivative:
self.psi_derivative[link][k] = np.exp(-(mu+phi) * t_diff) * (self.psi_derivative[link][k-1] - t_diff * (1 + self.psi[link][k-1])) - (np.sum(times * np.exp(-(mu+phi) * times)) if len(times) > 0 else 0)
self.psi_prime_derivative[link][k] = np.exp(-(mu_prime+phi_prime) * t_diff) * (self.psi_prime_derivative[link][k-1] - t_diff * (1 + self.psi_prime[link][k-1])) - \
(np.sum(times_prime * np.exp(-(mu_prime+phi_prime) * times_prime)) if len(times_prime) > 0 else 0)
## Adjust for repeated values (ties)
if self.discrete:
for k in np.where(tdiff == 0)[0]:
self.psi[link][k+1] = self.psi[link][k]
self.psi_prime[link][k+1] = self.psi_prime[link][k]
if calculate_derivative:
self.psi_derivative[link][k+1] = self.psi_derivative[link][k]
self.psi_prime_derivative[link][k+1] = self.psi_prime_derivative[link][k]
if verbose:
prop += self.nij[link]
print("\r+++ Percentage of processed links (main effects) +++ {:0.2f}%".format(prop / self.m * 100), end="")
if verbose:
print("")
## Interactions with Hawkes
if self.interactions and not self.poisson_int and self.hawkes_int:
self.psi_tilde = {}
if calculate_derivative:
self.psi_tilde_derivative = {}
self.psi_tilde_derivative_prime = {}
if verbose:
prop = 0
for link in self.A:
## For k=0, psi_tilde is 0
self.psi_tilde[link] = {}
self.psi_tilde[link][0] = np.zeros(self.D) if self.D > 1 else 0.0
if calculate_derivative:
self.psi_tilde_derivative[link] = {}
self.psi_tilde_derivative_prime[link] = {}
self.psi_tilde_derivative[link][0] = np.zeros(self.D) if self.D > 1 else 0.0
self.psi_tilde_derivative_prime[link][0] = np.zeros(self.D) if self.D > 1 else 0.0
## Obtain the parameters
nu = self.nu[link[0]]
nu_prime = self.nu_prime[link[1]] if self.directed else self.nu[link[1]]
theta = self.theta[link[0]]
theta_prime = self.theta_prime[link[1]] if self.directed else self.theta[link[1]]
## Calculate the remaining values of psi sequentially
if self.nij[link] > 1:
tdiff = np.diff(self.A[link]) if self.discrete else self.A_diff[link]
for k in range(1,self.nij[link]):
t_diff = tdiff[k-1]
self.psi_tilde[link][k] = np.exp(-(nu+theta) * (nu_prime+theta_prime) * t_diff) * (1 + self.psi_tilde[link][k-1])
if calculate_derivative:
self.psi_tilde_derivative[link][k] = np.exp(-(nu+theta) * (nu_prime+theta_prime) * t_diff) * (self.psi_tilde_derivative[link][k-1] - (nu_prime+theta_prime) * t_diff * (1 + self.psi_tilde[link][k-1]))
self.psi_tilde_derivative_prime[link][k] = np.exp(-(nu+theta) * (nu_prime+theta_prime) * t_diff) * (self.psi_tilde_derivative_prime[link][k-1] - (nu+theta) * t_diff * (1 + self.psi_tilde[link][k-1]))
## Adjust for repeated values (ties)
if self.discrete:
for k in np.where(tdiff == 0)[0]:
self.psi_tilde[link][k+1] = self.psi_tilde[link][k]
if calculate_derivative:
self.psi_tilde_derivative[link][k+1] = self.psi_tilde_derivative[link][k]
self.psi_tilde_derivative_prime[link][k+1] = self.psi_tilde_derivative_prime[link][k]
if verbose:
prop += self.nij[link]
print("\r+++ Percentage of processed links (interactions) +++ {:0.2f}%".format(prop / self.m * 100), end="")
if verbose:
print("")
## Recursive calculations of zeta (corresponding to lambda_ij for the observed links)
def zeta_calculation(self, verbose=False):
## Initialise the dictionary of dictionaries (key 1: link, key 2: k)
self.zeta = {}
if verbose:
prop = 0
for link in self.A:
self.zeta[link] = {}
if self.main_effects:
alpha = self.alpha[link[0]]
beta = self.beta[link[1]] if self.directed else self.alpha[link[1]]
if not self.poisson_me:
mu = self.mu[link[0]]
mu_prime = self.mu_prime[link[1]] if self.directed else self.mu[link[1]]
## Parameters needed if Hawkes processes are not used (otherwise psi should be available)
if not self.hawkes_me:
phi = self.phi[link[0]]
phi_prime | |
a max of 2^29 pixels, or 23170 on a side
# recompute max_dpi
max_dpi = int(23000 / max(fig_height, fig_width))
h, w = im.shape[:2]
# try to set dpi to native resolution of imagery
desired_dpi = max(default_dpi, 1.0 * h / fig_height)
#desired_dpi = max(default_dpi, int( np.max(im.shape) / max(fig_height, fig_width) ) )
dpi = int(np.min([max_dpi, desired_dpi ]))
# save and show the figure as specified
fig, ax = save_and_show(fig, ax, save, show, close, filename,
file_format, dpi, axis_off)
return fig, ax
###############################################################################
def plot_graph_route_pix(G, route, im=None, bbox=None, fig_height=6, fig_width=None,
margin=0.02, bgcolor='w', axis_off=True, show=True,
save=False, close=True, file_format='png', filename='temp',
dpi=300, annotate=False, node_color='#999999',
node_size=15, node_alpha=1, node_edgecolor='none',
node_zorder=1, edge_color='#999999', edge_linewidth=1,
edge_alpha=1,
edge_width_key='speed_mph',
edge_width_mult=1./25,
use_geom=True, origin_point=None,
destination_point=None, route_color='r', route_linewidth=4,
route_alpha=0.5, orig_dest_node_alpha=0.5,
orig_dest_node_size=100,
orig_dest_node_color='r'):
"""
Plot a route along a networkx spatial graph.
Parameters
----------
G : networkx multidigraph
route : list
the route as a list of nodes
bbox : tuple
bounding box as north,south,east,west - if None will calculate from
spatial extents of data. if passing a bbox, you probably also want to
pass margin=0 to constrain it.
fig_height : int
matplotlib figure height in inches
fig_width : int
matplotlib figure width in inches
margin : float
relative margin around the figure
axis_off : bool
if True turn off the matplotlib axis
bgcolor : string
the background color of the figure and axis
show : bool
if True, show the figure
save : bool
if True, save the figure as an image file to disk
close : bool
close the figure (only if show equals False) to prevent display
file_format : string
the format of the file to save (e.g., 'jpg', 'png', 'svg')
filename : string
the name of the file if saving
dpi : int
the resolution of the image file if saving
annotate : bool
if True, annotate the nodes in the figure
node_color : string
the color of the nodes
node_size : int
the size of the nodes
node_alpha : float
the opacity of the nodes
node_edgecolor : string
the color of the node's marker's border
node_zorder : int
zorder to plot nodes, edges are always 2, so make node_zorder 1 to plot
nodes beneath them or 3 to plot nodes atop them
edge_color : string
the color of the edges' lines
edge_linewidth : float
the width of the edges' lines
edge_alpha : float
the opacity of the edges' lines
use_geom : bool
if True, use the spatial geometry attribute of the edges to draw
geographically accurate edges, rather than just lines straight from node
to node
origin_point : tuple
optional, an origin (lat, lon) point to plot instead of the origin node
destination_point : tuple
optional, a destination (lat, lon) point to plot instead of the
destination node
route_color : string
the color of the route
route_linewidth : int
the width of the route line
route_alpha : float
the opacity of the route line
orig_dest_node_alpha : float
the opacity of the origin and destination nodes
orig_dest_node_size : int
the size of the origin and destination nodes
orig_dest_node_color : string
the color of the origin and destination nodes
(can be a string or list with (origin_color, dest_color))
of nodes
Returns
-------
fig, ax : tuple
"""
# plot the graph but not the route
fig, ax = plot_graph_pix(G, im=im, bbox=bbox, fig_height=fig_height, fig_width=fig_width,
margin=margin, axis_off=axis_off, bgcolor=bgcolor,
show=False, save=False, close=False, filename=filename,
default_dpi=dpi, annotate=annotate, node_color=node_color,
node_size=node_size, node_alpha=node_alpha,
node_edgecolor=node_edgecolor, node_zorder=node_zorder,
edge_color=edge_color, edge_linewidth=edge_linewidth,
edge_alpha=edge_alpha, edge_width_key=edge_width_key,
edge_width_mult=edge_width_mult,
use_geom=use_geom)
# the origin and destination nodes are the first and last nodes in the route
origin_node = route[0]
destination_node = route[-1]
if origin_point is None or destination_point is None:
# if caller didn't pass points, use the first and last node in route as
# origin/destination
origin_destination_ys = (G.nodes[origin_node]['y_pix'],
G.nodes[destination_node]['y_pix'])
origin_destination_xs = (G.nodes[origin_node]['x_pix'],
G.nodes[destination_node]['x_pix'])
else:
# otherwise, use the passed points as origin/destination
origin_destination_xs = (origin_point[0], destination_point[0])
origin_destination_ys = (origin_point[1], destination_point[1])
# scatter the origin and destination points
ax.scatter(origin_destination_xs, origin_destination_ys,
s=orig_dest_node_size,
c=orig_dest_node_color,
alpha=orig_dest_node_alpha, edgecolor=node_edgecolor, zorder=4)
# plot the route lines
edge_nodes = list(zip(route[:-1], route[1:]))
lines = []
for u, v in edge_nodes:
# if there are parallel edges, select the shortest in length
data = min(G.get_edge_data(u, v).values(), key=lambda x: x['length'])
# if it has a geometry attribute (ie, a list of line segments)
if 'geometry_pix' in data and use_geom:
# add them to the list of lines to plot
xs, ys = data['geometry_pix'].xy
lines.append(list(zip(xs, ys)))
else:
# if it doesn't have a geometry attribute, the edge is a straight
# line from node to node
x1 = G.nodes[u]['x_pix']
y1 = G.nodes[u]['y_pix']
x2 = G.nodes[v]['x_pix']
y2 = G.nodes[v]['y_pix']
line = [(x1, y1), (x2, y2)]
lines.append(line)
# add the lines to the axis as a linecollection
lc = LineCollection(lines, colors=route_color, linewidths=route_linewidth, alpha=route_alpha, zorder=3)
ax.add_collection(lc)
# save and show the figure as specified
fig, ax = save_and_show(fig, ax, save, show, close, filename, file_format, dpi, axis_off)
return fig, ax
###############################################################################
def save_and_show(fig, ax, save, show, close, filename, file_format, dpi,
axis_off, tight_layout=False):
"""
Save a figure to disk and show it, as specified.
Assume filename holds entire path to file
Parameters
----------
fig : figure
ax : axis
save : bool
whether to save the figure to disk or not
show : bool
whether to display the figure or not
close : bool
close the figure (only if show equals False) to prevent display
filename : string
the name of the file to save
file_format : string
the format of the file to save (e.g., 'jpg', 'png', 'svg')
dpi : int
the resolution of the image file if saving
axis_off : bool
if True matplotlib axis was turned off by plot_graph so constrain the
saved figure's extent to the interior of the axis
Returns
-------
fig, ax : tuple
"""
# save the figure if specified
if save:
start_time = time.time()
# create the save folder if it doesn't already exist
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
path_filename = filename #os.path.join(settings.imgs_folder, os.extsep.join([filename, file_format]))
if file_format == 'svg':
# if the file_format is svg, prep the fig/ax a bit for saving
ax.axis('off')
ax.set_position([0, 0, 1, 1])
ax.patch.set_alpha(0.)
fig.patch.set_alpha(0.)
fig.savefig(path_filename, bbox_inches=0, format=file_format, facecolor=fig.get_facecolor(), transparent=True)
else:
if axis_off:
# if axis is turned off, constrain the saved figure's extent to
# the interior of the axis
extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
else:
extent = 'tight'
if tight_layout:
# extent = 'tight'
fig.gca().set_axis_off()
fig.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0,
hspace = 0, wspace = 0)
plt.margins(0,0)
# fig.gca().xaxis.set_major_locator(NullLocator())
# fig.gca().yaxis.set_major_locator(NullLocator())
fig.savefig(path_filename, dpi=dpi, bbox_inches=extent,
format=file_format, facecolor=fig.get_facecolor(),
transparent=True, pad_inches=0)
else:
fig.savefig(path_filename, dpi=dpi, bbox_inches=extent,
format=file_format, facecolor=fig.get_facecolor(),
transparent=True)
print('Saved the figure to disk in {:,.2f} seconds'.format(time.time()-start_time))
# show the figure if specified
if show:
start_time = time.time()
plt.show()
print('Showed the plot in {:,.2f} seconds'.format(time.time()-start_time))
# if show=False, close the figure if close=True to prevent display
elif close:
plt.close()
return fig, ax
###############################################################################
def main():
default_crs = {'init':'epsg:4326'}
# Vegas0 settings
local = False
fig_height=12
fig_width=12
node_color='#66ccff' # light blue
#node_color='#8b3626' # tomato4
node_size=0.4
node_alpha=0.6
#edge_color='#999999' # gray
#edge_color='#ee5c42' # tomato2
edge_color='#bfefff' # lightblue1
edge_linewidth=0.2
edge_alpha=0.5
orig_dest_node_size=4.5*node_size
route_color='r'
orig_dest_node_color='r'
route_linewidth=4*edge_linewidth
#dpi=1000 # set dpi latet using image size, to approximate native resolution
# local
if local:
pass
# deployed
else:
from config import Config
parser = argparse.ArgumentParser()
parser.add_argument('config_path')
args = parser.parse_args()
with open(args.config_path, 'r') as f:
cfg = json.load(f)
config = Config(**cfg)
# outut files
res_root_dir = os.path.join(config.path_results_root, config.test_results_dir)
path_images_8bit = os.path.join(config.path_data_root, config.test_data_refined_dir)
graph_dir = os.path.join(res_root_dir, config.graph_dir)
out_dir = graph_dir.strip() + '_plots'
#res_root_dir = config.results_dir #os.path.dirname(im_dir)
#path_images_8bit = config.path_images_8bit
#graph_dir = os.path.join(res_root_dir, 'graphs')
# iterate through images and graphs, plot routes
im_list = sorted([z for z in os.listdir(path_images_8bit) if z.endswith('.tif')])
if shuffle:
random.shuffle(im_list)
for i,im_root in enumerate(im_list):#enumerate(os.listdir(path_images_8bit)):
if not im_root.endswith('.tif'):
continue
if i >= max_plots:
break
im_root_no_ext = im_root.split('.tif')[0]
im_file = os.path.join(path_images_8bit, im_root)
graph_pkl = os.path.join(graph_dir, im_root_no_ext + '.gpickle')
print ("\n\n", | |
<filename>sprcom.py
import numpy as np
import pymc3 as pm
import matplotlib.pyplot as plt
import theano.tensor as tt
from scipy.linalg import eigvalsh
from scipy.sparse import csr_matrix
from theano.sparse import as_sparse_variable
from theano.sparse import dot as t_sparse_dot
from theano import shared
from seaborn import heatmap
def expand_packed(n,packed):
"""
Unpacks the entries of a lower triangular matrix from one dimension
to two dimensions.Identical to PyMC3 expand_packed except it operates on
numpy arrays rather than theano tensors.
Parameters
----------
n : int
The dimension of the matrix to be unpacked
packed : 1D array-like
The array of matrix elements to be unpacked
Returns
-------
out : 2D Numpy array
Square, lower triangular matrix
"""
out = np.zeros([n,n])
ctr = 0
for i in range(n):
for j in range(i+1):
out[i,j] = packed[ctr]
ctr+=1
return out
def get_neighbors(state):
"""
For a 2D lattice, this creates a numpy array in which
each entry is a list of tuples indicating the indices of its
neighbors.
Parameters
----------
state : 2D Numpy array
Array with the shape that should be copied
for the resulting 2D lattice
Returns
-------
neighbors : 2D Numpy array with list entries
2D array indicating for each row, column position
which matrix elements are its neighbors
"""
y,x = state.shape
# This array's entries are a list of neighbor
# points for each site
neighbors = np.empty([x,y],dtype=object)
# Precalculate the list of neighbors for each cell so
# this doesn't need to be recalculated on the fly.
for i in range(y):
for j in range(x):
neighbors[i,j] = []
if i is not 0:
# add the neighbor above
neighbors[i,j].append((i-1,j))
if j is not 0:
# add the neighbor to the left
neighbors[i,j].append((i,j-1))
if i is not y-1:
# add the neighbor below
neighbors[i,j].append((i+1,j))
if j is not x-1:
# add the neighbor to the right
neighbors[i,j].append((i,j+1))
return neighbors
def adjacency_from_1d(neighbors_1d):
"""
Takes a 1D list of lists where the i-th inner list contains
the indices of the i-th site's neighbors and converts it into
an adjacency matrix.
Parameters
----------
neighbors_1d : list of arrays
Sequence of lists with entries indicating the row and
column index of neighbors in an adjacency matrix
Returns
-------
W : 2D Numpy array
binary adjacency matrix created according to the specification
in neighbors_1d
"""
n = len(neighbors_1d)
W = np.zeros([n,n])
for i in range(n):
pairs = neighbors_1d[i]
for j in pairs:
W[i,j] = 1
return W
def coverage(samples,true,width=90):
"""
Calculates the fraction of variables with estimates
falling within sample credible intervals.
Parameters
----------
samples : 2D Numpy array
Array of samples across multiple variables
with shape [n_samples, n_variables]
true : 1D Numpy array
Array of true values with shape [n_variables]
width : integer
Percentile from 0-100 specifying the width of
the empirical credible interval to use.
Returns
-------
fraction_covered : float
The fraction of true variable values which
fell within the sampled credible interval
"""
qs = [(100-width)/2,width+(100-width)/2]
perc = np.percentile(samples,qs,axis=0)
fraction_covered = np.mean((true>perc[0,:]) & (true<perc[1,:]))
return fraction_covered
def sample_CAR(W,rho,tau):
"""
Samples a realization of a conditional autoregression
/ Markov random field with correlation rho and precision tau.
Parameters
----------
W : 2D Numpy array
Binary adjacency matrix with zeros on the diagonal
rho : float
Number between -1 and 1 specifying the spatial correlation
tau : float
Scale of the CAR realization in terms of the precision
Returns
-------
sample : 2D Numpy array
A random draw from the conditional autoregression /
Markov random field defined by the input parameters
"""
n = W.shape[0]
D = W.sum(axis=0)
D_W = np.diag(D)
prec = tau * (D_W - rho * W)
cov = np.linalg.inv(prec)
L = np.linalg.cholesky(cov)
w = np.random.randn(n)
sample = np.dot(L,w)
return sample
def create_W_grid(width):
"""
Creates an adjacency matrix for a square grid defined
in terms of its width
Arguments
---------
width : int
row and column dimension of the desired square
adjacency matrix
Returns
-------
W : 2D Numpy array
Square binary adjacency matrix
"""
# Create 2d lattice that gets unraveled into 1D
grid = np.arange(width**2).reshape([width,width])
# Identify indices of neighbors for each entry
neighbors = get_neighbors(grid).ravel()
neighbors_1d = [[grid[x] for x in l] for l in neighbors]
grid = grid.ravel()
# Create adjacency matrix
W = adjacency_from_1d(neighbors_1d)
return W
def diverse_community_matrix(S,C,multiplier=1,width=1):
"""
Quick way to make a species/community matrix
with diverse structure across communities. This matrix has rows that
individually look like normal PDFs which are offset to make them dissimilar.
Arguments
---------
S : int
Number of species
C : int
Number of communities, i.e. clusters of species
multiplier : float
Scale of the matrix entries
width : float
Spread of species across the community vector. Increase this
to force each community to have more significant species.
Returns
-------
sc : 2D Numpy array
Matrix with dimensions [n_species,n_communities] where each
community is represented by a positive vector with shape [n_species].
These per-community vectors can have their inner product adjusted by
the width parameter.
"""
sc = np.zeros([S,C])
x = np.linspace(-2,2,S)
y = np.exp(-(1./width**2)*x**2)
shift = int(S/C)
for c in range(C):
sc[:,c] = np.roll(y,shift=-shift*c,axis=0)
sc = sc.T * multiplier
return
def empirical_coverage(point,trace,width=90):
"""
Calculates empirical coverage rates for all variables
in a trace.
Arguments
---------
point : dict
Dictionary mapping variable names to their true values.
These names must be the same as in <trace>.
trace : PyMC3 MultiTrace
Posterior samples with variable names identical to <point>
width : int
Number from 0-100 indicating the nominal width of the credible
intervals used to calculate the coverage.
Returns
-------
results : dict
Dictionary of empirical coverage rates
"""
rvs = point.keys()
results = {}
total_coverage = 0
total_size = 0
for rv in rvs:
try:
true = point[rv]
samples = trace[rv]
size = np.product(true.shape)
rv_coverage = coverage(samples,true,width=width)
results[rv] = {'coverage':rv_coverage,'size':size}
total_coverage += rv_coverage * size
total_size += size
except:
pass
results['total']={'coverage':total_coverage/total_size,'size':total_size}
return results
def tjur_r2(p, obs):
"""
Gives the Tjur R-squared for a binary observation array
<obs> and a array of values on [0,1] in <p> with the same
shape as <obs>
Arguments
---------
p : Numpy array
Predicted probabilities generated by a model
obs : Numpy array
Binary observations
Returns
-------
r2 : float
Tjur R-squared
"""
obs = obs.astype(bool)
p_vec = p.ravel()
obs_vec = obs.ravel()
r2 = np.mean(p_vec[obs_vec])- np.mean(p_vec[~obs_vec])
return r2
def get_reordering(samples, true, C):
"""
Calculates a reindexing that will align <samples>
and <true> along their first non-sample dimension.
This reindexing is determined by maximizing the correlation
between vectors of the posterior mean of <samples> with vectors in <true>
Arguments
---------
samples : 3D Numpy array
Posterior estimates of variables
with dimension [n_samples, n_variables1, n_variables2]
true : 2D Numpy array
True values of variables with dimension [n_variables1, n_variables2]
Returns
-------
permutation : 1D Numpy array
Sequence of indices which, if used to index into samples.mean(),
would rearrange them to share the same indices as <true>.
"""
posterior_mean = samples.mean(axis=0)
corr = np.corrcoef(true,posterior_mean)[0:C,C:2*C]
inverse_permutation = np.linalg.inv((corr == corr.max(axis=0)).astype(int))
permutation = np.argmax(inverse_permutation,axis = 0)
return permutation
def print_species(mean_phi, species_index,name_col='scientificname',
num_to_show=10):
"""
Prints off the main species for a community matrix.
Parameters
----------
mean_phi : 2D Numpy array
Posterior mean estimate of a species-community matrix with shape
[n_species, n_communities]
species_index : Pandas dataframe
Pandas dataframe listing the integer index of a species along with
its name
name_col : string
Name of the column in <species_index> which lists the name
num_to_show : int
Number of species to print for each community
"""
C = mean_phi.shape[0]
for c in range(C):
top_indices = np.argsort(mean_phi[c,:])[-num_to_show::]
top_species = species_index.iloc[top_indices][name_col].values
print('\nSpecies for community {0}'.format(c))
for species in top_species:
print('\t',species)
def coefficient_plot(samples,x_labels,figsize =(5,3)):
"""
Generates a plot showing the relation between communities and regression
coefficients. Asterisks / stars indicate variables that are significant
at the 2 sigma level.
Parameters
----------
samples : 3D Numpy array
coefficient samples with shape [n_samples, n_covariates, n_communities]
x_labels : List of strings
Names of the covariates
figsize : tuple
Size of the desired plot
Returns
-------
fig : Matplotlib Figure
| |
# Authors: <NAME> <<EMAIL>>
#
# Copyright (C) 2006 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; version 2 only
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
Classes for representing and manipulating interfaces.
"""
import copy
import itertools
from . import access
from . import refpolicy
from . import objectmodel
from . import matching
from .sepolgeni18n import _
class Param:
"""
Object representing a paramater for an interface.
"""
def __init__(self):
self.__name = ""
self.type = refpolicy.SRC_TYPE
self.obj_classes = refpolicy.IdSet()
self.required = True
def set_name(self, name):
if not access.is_idparam(name):
raise ValueError("Name [%s] is not a param" % name)
self.__name = name
def get_name(self):
return self.__name
name = property(get_name, set_name)
num = property(fget=lambda self: int(self.name[1:]))
def __repr__(self):
return "<sepolgen.policygen.Param instance [%s, %s, %s]>" % \
(self.name, refpolicy.field_to_str[self.type], " ".join(self.obj_classes))
# Helper for extract perms
def __param_insert(name, type, av, params):
ret = 0
if name in params:
p = params[name]
# The entries are identical - we're done
if type == p.type:
return
# Hanldle implicitly typed objects (like process)
if (type == refpolicy.SRC_TYPE or type == refpolicy.TGT_TYPE) and \
(p.type == refpolicy.TGT_TYPE or p.type == refpolicy.SRC_TYPE):
#print name, refpolicy.field_to_str[p.type]
# If the object is not implicitly typed, tell the
# caller there is a likely conflict.
ret = 1
if av:
avobjs = [av.obj_class]
else:
avobjs = []
for obj in itertools.chain(p.obj_classes, avobjs):
if obj in objectmodel.implicitly_typed_objects:
ret = 0
break
# "Promote" to a SRC_TYPE as this is the likely usage.
# We do this even if the above test fails on purpose
# as there is really no sane way to resolve the conflict
# here. The caller can take other actions if needed.
p.type = refpolicy.SRC_TYPE
else:
# There is some conflict - no way to resolve it really
# so we just leave the first entry and tell the caller
# there was a conflict.
ret = 1
else:
p = Param()
p.name = name
p.type = type
params[p.name] = p
if av:
p.obj_classes.add(av.obj_class)
return ret
def av_extract_params(av, params):
"""Extract the paramaters from an access vector.
Extract the paramaters (in the form $N) from an access
vector, storing them as Param objects in a dictionary.
Some attempt is made at resolving conflicts with other
entries in the dict, but if an unresolvable conflict is
found it is reported to the caller.
The goal here is to figure out how interface paramaters are
actually used in the interface - e.g., that $1 is a domain used as
a SRC_TYPE. In general an interface will look like this:
interface(`foo', `
allow $1 foo : file read;
')
This is simple to figure out - $1 is a SRC_TYPE. A few interfaces
are more complex, for example:
interface(`foo_trans',`
domain_auto_trans($1,fingerd_exec_t,fingerd_t)
allow $1 fingerd_t:fd use;
allow fingerd_t $1:fd use;
allow fingerd_t $1:fifo_file rw_file_perms;
allow fingerd_t $1:process sigchld;
')
Here the usage seems ambigious, but it is not. $1 is still domain
and therefore should be returned as a SRC_TYPE.
Returns:
0 - success
1 - conflict found
"""
ret = 0
found_src = False
if access.is_idparam(av.src_type):
if __param_insert(av.src_type, refpolicy.SRC_TYPE, av, params) == 1:
ret = 1
if access.is_idparam(av.tgt_type):
if __param_insert(av.tgt_type, refpolicy.TGT_TYPE, av, params) == 1:
ret = 1
if access.is_idparam(av.obj_class):
if __param_insert(av.obj_class, refpolicy.OBJ_CLASS, av, params) == 1:
ret = 1
for perm in av.perms:
if access.is_idparam(perm):
if __param_insert(perm, PERM) == 1:
ret = 1
return ret
def role_extract_params(role, params):
if access.is_idparam(role.role):
return __param_insert(role.role, refpolicy.ROLE, None, params)
def type_rule_extract_params(rule, params):
def extract_from_set(set, type):
ret = 0
for x in set:
if access.is_idparam(x):
if __param_insert(x, type, None, params):
ret = 1
return ret
ret = 0
if extract_from_set(rule.src_types, refpolicy.SRC_TYPE):
ret = 1
if extract_from_set(rule.tgt_types, refpolicy.TGT_TYPE):
ret = 1
if extract_from_set(rule.obj_classes, refpolicy.OBJ_CLASS):
ret = 1
if access.is_idparam(rule.dest_type):
if __param_insert(rule.dest_type, refpolicy.DEST_TYPE, None, params):
ret = 1
return ret
def ifcall_extract_params(ifcall, params):
ret = 0
for arg in ifcall.args:
if access.is_idparam(arg):
# Assume interface arguments are source types. Fairly safe
# assumption for most interfaces
if __param_insert(arg, refpolicy.SRC_TYPE, None, params):
ret = 1
return ret
class AttributeVector:
def __init__(self):
self.name = ""
self.access = access.AccessVectorSet()
def add_av(self, av):
self.access.add_av(av)
class AttributeSet:
def __init__(self):
self.attributes = { }
def add_attr(self, attr):
self.attributes[attr.name] = attr
def from_file(self, fd):
def parse_attr(line):
fields = line[1:-1].split()
if len(fields) != 2 or fields[0] != "Attribute":
raise SyntaxError("Syntax error Attribute statement %s" % line)
a = AttributeVector()
a.name = fields[1]
return a
a = None
for line in fd:
line = line[:-1]
if line[0] == "[":
if a:
self.add_attr(a)
a = parse_attr(line)
elif a:
l = line.split(",")
av = access.AccessVector(l)
a.add_av(av)
if a:
self.add_attr(a)
class InterfaceVector:
def __init__(self, interface=None, attributes={}):
# Enabled is a loose concept currently - we are essentially
# not enabling interfaces that we can't handle currently.
# See InterfaceVector.add_ifv for more information.
self.enabled = True
self.name = ""
# The access that is enabled by this interface - eventually
# this will include indirect access from typeattribute
# statements.
self.access = access.AccessVectorSet()
# Paramaters are stored in a dictionary (key: param name
# value: Param object).
self.params = { }
if interface:
self.from_interface(interface, attributes)
self.expanded = False
def from_interface(self, interface, attributes={}):
self.name = interface.name
# Add allow rules
for avrule in interface.avrules():
if avrule.rule_type != refpolicy.AVRule.ALLOW:
continue
# Handle some policy bugs
if "dontaudit" in interface.name:
#print "allow rule in interface: %s" % interface
continue
avs = access.avrule_to_access_vectors(avrule)
for av in avs:
self.add_av(av)
# Add typeattribute access
if attributes:
for typeattribute in interface.typeattributes():
for attr in typeattribute.attributes:
if attr not in attributes.attributes:
# print "missing attribute " + attr
continue
attr_vec = attributes.attributes[attr]
for a in attr_vec.access:
av = copy.copy(a)
if av.src_type == attr_vec.name:
av.src_type = typeattribute.type
if av.tgt_type == attr_vec.name:
av.tgt_type = typeattribute.type
self.add_av(av)
# Extract paramaters from roles
for role in interface.roles():
if role_extract_params(role, self.params):
pass
#print "found conflicting role param %s for interface %s" % \
# (role.name, interface.name)
# Extract paramaters from type rules
for rule in interface.typerules():
if type_rule_extract_params(rule, self.params):
pass
#print "found conflicting params in rule %s in interface %s" % \
# (str(rule), interface.name)
for ifcall in interface.interface_calls():
if ifcall_extract_params(ifcall, self.params):
pass
#print "found conflicting params in ifcall %s in interface %s" % \
# (str(ifcall), interface.name)
def add_av(self, av):
if av_extract_params(av, self.params) == 1:
pass
#print "found conflicting perms [%s]" % str(av)
self.access.add_av(av)
def to_string(self):
s = []
s.append("[InterfaceVector %s]" % self.name)
for av in self.access:
s.append(str(av))
return "\n".join(s)
def __str__(self):
return self.__repr__()
def __repr__(self):
return "<InterfaceVector %s:%s>" % (self.name, self.enabled)
class InterfaceSet:
def __init__(self, output=None):
self.interfaces = { }
self.tgt_type_map = { }
self.tgt_type_all = []
self.output = output
def o(self, str):
if self.output:
self.output.write(str + "\n")
def to_file(self, fd):
for iv in sorted(self.interfaces.values(), key=lambda x: x.name):
fd.write("[InterfaceVector %s " % iv.name)
for param in sorted(iv.params.values(), key=lambda x: x.name):
fd.write("%s:%s " % (param.name, refpolicy.field_to_str[param.type]))
fd.write("]\n")
avl = sorted(iv.access.to_list())
for av in avl:
fd.write(",".join(av))
fd.write("\n")
def from_file(self, fd):
def parse_ifv(line):
fields = line[1:-1].split()
if len(fields) < 2 or fields[0] != "InterfaceVector":
raise SyntaxError("Syntax error InterfaceVector statement %s" % line)
ifv = InterfaceVector()
ifv.name = fields[1]
if len(fields) == 2:
return
for field in fields[2:]:
p = field.split(":")
if len(p) != 2:
raise SyntaxError("Invalid param in InterfaceVector statement %s" % line)
param = Param()
param.name = p[0]
param.type = refpolicy.str_to_field[p[1]]
ifv.params[param.name] = param
return ifv
ifv = None
for line in fd:
line = line[:-1]
if | |
# import your word dictionary here from another file
# The game is started (this function is called) by the last line in this file
# TODO: part 1
def init_wordle_ai():
# during this project, you may want to initialize global variables here at the beginning
# global vars can be accessed anywhere in this file.
# to edit a global var in another function, declare `global your_var_name` on one line, then `your_var_name = "whatever"` on another line after
# I'll give you one to start with. use this to keep track of how many guesses the computer has made.
global num_guesses
num_guesses = 0 # start it at 0
global answer # declare the answer as a global
global MAX_GUESSES
MAX_GUESSES = 10 # some variables contain info that we won't want to change, but we need as a reference. instead of hard coding this number in a few places,
# we define it at the top and reuse the variable anytime we need this number. this type of variable is called a CONSTANT and is usually capitalized. constant means it can't be changed once initialized
# another benefit is we can easily change the behavior of our program by changing the constant at the top, and we don't have to dig around in our
# code for every time we wrote `10` and change it to something else.
# bonus points: pass in a parameter to assign to the constant
# Randomly pick a word from the wordle dictionary, print it out, and assign it to `answer`
# Now we go to the main playing portion
return gameplay_loop()
# what's the gameplay cycle for Wordle?
# 1. Guess a word
# 2. get assigned colors for each letter based on a comparison to the answer
# 3. keep guessing until you get it correct or run out of guesses
# To start with lets implement a "dumb" wordle solver. Have it guess random dictionary words.
# If it somehow finds the answer, have it finish the game.
# Make the game end after a certain amount of guesses (maybe 10) otherwise you'll be here all day guessing 13,000 words.
# After each guess, return a list of colors for each position:
# Black = this letter isn't in the answer
# Yellow = this letter is in the answer but isn't in the right position
# Green = correct letter in correct spot
# We won't be using the colors yet, but implement the functionality for it here
# TODO part 2
def gameplay_loop():
print("Start game!")
# The gameplay 'cycle' could also be called a LOOP! What loop type would be good here? We want it to keep going forever,
# until we break out of it in one of 2 scenarios: the guess is correct, or you run out of guesses.
# declare loop
# within the loop, pick a random dictionary word as the guess and call it `guess`
# then uncomment this line
# guess_colors = guess_word(guess)
# come back after you finish guess_word() function
# uncomment the next line and go to the print_guess() function
# print_guess(guess_colors, guess)
# now with the guess_colors returned, check if they're all green.
# if so, break out of the loop, print something about the computer winning, and quit the program
# otherwise, do nothing, and the loop will repeat
# how would we break out of the loop if the max number of guesses are exceeded?
# in that case, we want to print a fail message that the computer didn't guess the answer within the allotted guesses, and quit the program.
# TODO part 3
# In this function, we want to check the guess against the answer, and RETURN a list of associated colors (black, yellow or green)
def guess_word(guess):
# Let grab some of our global variables here. to ACCESS global vars, you don't have to grab them like this,
# but if you want to edit them (hint), you do have to say "global var_name" again, if you're in a different function from where they were declared (which we are)
global num_guesses
# I'll give you another variable. this is a dictionary. dicts have key/value pairs
# example: my_dictionary = {"key1": "value1", "key2": "value2", ...}
# and can be accessed like: my_dictionary["key2"] which would return "value2"
# a dictionary in this format (number, string) will let us easily visualize the guess colors for each position.
# the keys are positions in the word (position 0 = 1st letter, 1 = 2nd letter, etc)
# the values can be a string representing the color. "black", "yellow", or "green". right now they're initialized to empty strings
guess_colors = {
0: "",
1: "",
2: "",
3: "",
4: "",
}
# print the guess number, and the guess word here like 'guess #1: flute'
# now for some good stuff! compare the guess and the answer
# if the guess is exactly the same as the answer
# change all the values in guess_colors to green
# return the guess_colors
# otherwise,
# cycle through the word and check guess letter 1 against answer letter 1, guess letter 2 against answer letter 2, etc...
# assign the correct color to each position
# TODO part 4
# we know we want to print the answer many times throughout the program, so lets make it a function so we only have to write the code for that in 1 place!
# print the colors for each letter in the last guess
# bonus points: print them on the same line, like wordle
def print_guess(guess_colors):
print("") # have to put something within the function or python will be mad
# Huzzah! If you did all that, you should have a basic 'dumb' wordle solver! Run it and test it out. Use debug mode if things aren't working quite right to help find the issues
# TODO part 5
# Now for the `smart` solver...
# Instead of picking the next word randomly from the dictionary, call this function which will intelligently pick the next word
# Pass the colors in as a parameter.
# Pick a word from the dictionary and check against the colors and the last guess to see if it is a likely candidate for the next guess. this part can get tricky
# How would we get the last guess? Go back and figure out a way to get that value here
def pick_next_word():
print("")
# TODO part 6 - some bonus improvements
# bonus points: COLOR each letter that you print so it looks like real wordle output! (hint: may need to download and import something for this. google it)
# bonus points for yellow which you'll need down the road:
# reference this picture https://www.reddit.com/r/wordle/comments/ry49ne/illustration_of_what_happens_when_your_guess_has/
# yellow assignment gets tricky with double letters...
# refactor (meaning: rewrite/improve) your yellow color assignment with this in mind
# I found out I needed to do this because occasionally it would get stuck in an endless loop and couldn't find the answer and this was the culprit
# bonus points: don't just compare "black" letters for the LAST word guessed, you can rule out "black" letters for all previous guesses too,
# to make your next guess even more accurate. find a way to do this...
# bonus points: run the program a thousand times and record the results, calculate the average number of guesses to solve it.
# (I don't mean click it and write down the results that many times :P you can | |
one with only `start` set in which
case the access is identical to :func:`Trie.itervalues` invocation with
prefix argument.
Example:
>>> import pygtrie
>>> t = pygtrie.StringTrie()
>>> t['foo/bar'] = 'Bar'
>>> t['foo/baz'] = 'Baz'
>>> t['qux'] = 'Qux'
>>> t['foo/bar']
'Bar'
>>> list(t['foo':])
['Baz', 'Bar']
>>> t['foo']
Traceback (most recent call last):
...
pygtrie.ShortKeyError: 'foo'
Args:
key_or_slice: A key or a slice to look for.
Returns:
If a single key is passed, a value associated with given key. If
a slice is passed, a generator of values in specified subtrie.
Raises:
ShortKeyError: If the key has no value associated with it but is
a prefix of some key with a value. Note that
:class:`ShortKeyError` is subclass of :class:`KeyError`.
KeyError: If key has no value associated with it nor is a prefix of
an existing key.
TypeError: If ``key_or_slice`` is a slice but it's stop or step are
not ``None``.
"""
if self._slice_maybe(key_or_slice)[1]:
return self.itervalues(key_or_slice.start)
node, _ = self._get_node(key_or_slice)
if node.value is _SENTINEL:
raise ShortKeyError(key_or_slice)
return node.value
def _set(self, key, value, only_if_missing=False, clear_children=False):
"""Sets value for a given key.
Args:
key: Key to set value of.
value: Value to set to.
only_if_missing: If ``True``, value won't be changed if the key is
already associated with a value.
clear_children: If ``True``, all children of the node, if any, will
be removed.
Returns:
Value of the node.
"""
node, _ = self._get_node(key, create=True)
if not only_if_missing or node.value is _SENTINEL:
node.value = value
if clear_children:
node.children.clear()
return node.value
def __setitem__(self, key_or_slice, value):
"""Sets value associated with given key.
If `key_or_slice` is a key, simply associate it with given value. If it
is a slice (which must have `start` set only), it in addition clears any
subtrie that might have been attached to particular key. For example::
>>> import pygtrie
>>> t = pygtrie.StringTrie()
>>> t['foo/bar'] = 'Bar'
>>> t['foo/baz'] = 'Baz'
>>> t.keys()
['foo/baz', 'foo/bar']
>>> t['foo':] = 'Foo'
>>> t.keys()
['foo']
Args:
key_or_slice: A key to look for or a slice. If it is a slice, the
whole subtrie (if present) will be replaced by a single node
with given value set.
value: Value to set.
Raises:
TypeError: If key is a slice whose stop or step are not None.
"""
key, is_slice = self._slice_maybe(key_or_slice)
self._set(key, value, clear_children=is_slice)
def setdefault(self, key, value=None):
"""Sets value of a given node if not set already. Also returns it.
In contrast to :func:`Trie.__setitem__`, this method does not accept
slice as a key.
"""
return self._set(key, value, only_if_missing=True)
@staticmethod
def _cleanup_trace(trace):
"""Removes empty nodes present on specified trace.
Args:
trace: Trace to the node to cleanup as returned by
:func:`Trie._get_node`.
"""
i = len(trace) - 1 # len(path) >= 1 since root is always there
step, node = trace[i]
while i and not node:
i -= 1
parent_step, parent = trace[i]
del parent.children[step]
step, node = parent_step, parent
def _pop_from_node(self, node, trace, default=_SENTINEL):
"""Removes a value from given node.
Args:
node: Node to get value of.
trace: Trace to that node as returned by :func:`Trie._get_node`.
default: A default value to return if node has no value set.
Returns:
Value of the node or ``default``.
Raises:
ShortKeyError: If the node has no value associated with it and
``default`` has not been given.
"""
if node.value is not _SENTINEL:
value = node.value
node.value = _SENTINEL
self._cleanup_trace(trace)
return value
elif default is _SENTINEL:
raise ShortKeyError()
else:
return default
def pop(self, key, default=_SENTINEL):
"""Deletes value associated with given key and returns it.
Args:
key: A key to look for.
default: If specified, value that will be returned if given key has
no value associated with it. If not specified, method will
throw KeyError in such cases.
Returns:
Removed value, if key had value associated with it, or ``default``
(if given).
Raises:
ShortKeyError: If ``default`` has not been specified and the key has
no value associated with it but is a prefix of some key with
a value. Note that :class:`ShortKeyError` is subclass of
:class:`KeyError`.
KeyError: If default has not been specified and key has no value
associated with it nor is a prefix of an existing key.
"""
try:
return self._pop_from_node(*self._get_node(key))
except KeyError:
if default is not _SENTINEL:
return default
raise
def popitem(self):
"""Deletes an arbitrary value from the trie and returns it.
There is no guarantee as to which item is deleted and returned. Neither
in respect to its lexicographical nor topological order.
Returns:
``(key, value)`` tuple indicating deleted key.
Raises:
KeyError: If the trie is empty.
"""
if not self:
raise KeyError()
node = self._root
trace = [(None, node)]
while node.value is _SENTINEL:
step = next(_iterkeys(node.children))
node = node.children[step]
trace.append((step, node))
return (self._key_from_path((step for step, _ in trace[1:])),
self._pop_from_node(node, trace))
def __delitem__(self, key_or_slice):
"""Deletes value associated with given key or raises KeyError.
If argument is a key, value associated with it is deleted. If the key
is also a prefix, its descendents are not affected. On the other hand,
if the argument is a slice (in which case it must have only start set),
the whole subtrie is removed. For example::
>>> import pygtrie
>>> t = pygtrie.StringTrie()
>>> t['foo'] = 'Foo'
>>> t['foo/bar'] = 'Bar'
>>> t['foo/bar/baz'] = 'Baz'
>>> del t['foo/bar']
>>> t.keys()
['foo', 'foo/bar/baz']
>>> del t['foo':]
>>> t.keys()
[]
Args:
key_or_slice: A key to look for or a slice. If key is a slice, the
whole subtrie will be removed.
Raises:
ShortKeyError: If the key has no value associated with it but is
a prefix of some key with a value. This is not thrown is
key_or_slice is a slice -- in such cases, the whole subtrie is
removed. Note that :class:`ShortKeyError` is subclass of
:class:`KeyError`.
KeyError: If key has no value associated with it nor is a prefix of
an existing key.
TypeError: If key is a slice whose stop or step are not ``None``.
"""
key, is_slice = self._slice_maybe(key_or_slice)
node, trace = self._get_node(key)
if is_slice:
node.children.clear()
elif node.value is _SENTINEL:
raise ShortKeyError(key)
node.value = _SENTINEL
self._cleanup_trace(trace)
def prefixes(self, key):
"""Walks towards the node specified by key and yields all found items.
Example:
>>> import pygtrie
>>> t = pygtrie.StringTrie()
>>> t['foo'] = 'Foo'
>>> t['foo/bar/baz'] = 'Baz'
>>> list(t.prefixes('foo/bar/baz/qux'))
[('foo', 'Foo'), ('foo/bar/baz', 'Baz')]
>>> list(t.prefixes('does/not/exist'))
[]
Args:
key: Key to look for.
Yields:
``(k, value)`` pairs denoting keys with associated values
encountered on the way towards the specified key.
"""
node = self._root
path = self.__path_from_key(key)
pos = 0
while True:
if node.value is not _SENTINEL:
yield self._key_from_path(path[:pos]), node.value
if pos == len(path):
break
node = node.children.get(path[pos])
if not node:
break
pos += 1
def shortest_prefix(self, key):
"""Finds the shortest prefix of a key with a value.
This is equivalent to taking the first object yielded by
:func:`Trie.prefixes` with a default of `(None, None)` if said method
yields no items. As an added bonus, the pair in that case will be
a falsy value (as opposed to regular two-element tuple of ``None``
values).
Example:
>>> import pygtrie
>>> t = pygtrie.StringTrie()
>>> t['foo'] = 'Foo'
>>> t['foo/bar/baz'] = 'Baz'
>>> t.shortest_prefix('foo/bar/baz/qux')
('foo', 'Foo')
>>> t.shortest_prefix('does/not/exist')
(None, None)
>>> bool(t.shortest_prefix('does/not/exist'))
False
Args:
key: Key to look for.
Returns:
``(k, value)`` where ``k`` is the shortest prefix of ``key`` (it may
equal ``key``) and ``value`` is a value associated with that key.
If no node is found, ``(None, None)`` is returned.
"""
return next(self.prefixes(key), _NONE_PAIR)
def longest_prefix(self, key):
"""Finds the longest prefix of a key with a value.
This is equivalent to taking the last object yielded by
:func:`Trie.prefixes` with a default of `(None, None)` if said method
yields no items. As an added bonus, the pair in that case will be
a falsy value (as opposed to regular two-element tuple of ``None``
values).
Example:
>>> import pygtrie
>>> t = pygtrie.StringTrie()
>>> t['foo'] = | |
% (o[0]-ovhx, o[1]-ovhy, o[2]+z-ovhz)
overhangs[0] += " %s %s %s" % (o[0]+x+ovhx, o[1]-ovhy, o[2]+z-ovhz)
overhangs[0] += " " + p[5]
overhangs[0] += " " + p[4]
overhangs[0] += " %s %s %s" % (o[0]-ovhx, o[1]-ovhy, o[2]+z-ovhz)
interior.append(None)
overhangs.append("")
overhangs[1] += "%s %s %s" % (o[0]+x+ovhx, o[1]-ovhy, o[2]+z-ovhz)
overhangs[1] += " %s %s %s" % (o[0]+x+ovhx, o[1]+y+ovhy, o[2]+z-ovhz)
overhangs[1] += " " + p[6]
overhangs[1] += " " + p[5]
overhangs[1] += " %s %s %s" % (o[0]+x+ovhx, o[1]-ovhy, o[2]+z-ovhz)
interior.append(None)
overhangs.append("")
overhangs[2] += "%s %s %s" % (o[0]-ovhx, o[1]+y+ovhy, o[2]+z-ovhz)
overhangs[2] += " " + p[7]
overhangs[2] += " " + p[6]
overhangs[2] += " %s %s %s" % (o[0]+x+ovhx, o[1]+y+ovhy, o[2]+z-ovhz)
overhangs[2] += " %s %s %s" % (o[0]-ovhx, o[1]+y+ovhy, o[2]+z-ovhz)
interior.append(None)
overhangs.append("")
overhangs[3] += "%s %s %s" % (o[0]-ovhx, o[1]-ovhy, o[2]+z-ovhz)
overhangs[3] += " " + p[4]
overhangs[3] += " " + p[7]
overhangs[3] += " %s %s %s" % (o[0]-ovhx, o[1]+y+ovhy, o[2]+z-ovhz)
overhangs[3] += " %s %s %s" % (o[0]-ovhx, o[1]-ovhy, o[2]+z-ovhz)
interior.append(None)
eaves = o[2]+z-ovhz
elif rtype == 'Flat':
overhangs.append("")
overhangs[0] += "%s %s %s" % (o[0]-ovhx,o[1]-ovhy,o[2]+z)
overhangs[0] += " %s %s %s" % (o[0]+x+ovhx,o[1]-ovhy,o[2]+z)
overhangs[0] += " %s %s %s" % (o[0]+x+ovhx,o[1]+y+ovhy,o[2]+z)
overhangs[0] += " %s %s %s" % (o[0]-ovhx,o[1]+y+ovhy,o[2]+z)
overhangs[0] += " %s %s %s" % (o[0]-ovhx,o[1]-ovhy,o[2]+z)
interior.append("")
interior[0] += p[4]
interior[0] += " " + p[7]
interior[0] += " " + p[6]
interior[0] += " " + p[5]
interior[0] += " " + p[4]
eaves = o[2]+z
ovhy_recalculated = ovhy
#-- Overhang points
return overhangs, interior, eaves, ovhy_recalculated
def wallOpeningOrganiser(openings):
"""Divide the openings per wall."""
if openings:
holes = [[], [], [], []]
opns = [[], [], [], []]
for i in range(0,4):
opns[i].append([])
opns[i].append([])
door = openings[0]
if door != '':
doorwall = int(door['wall'])
holes[doorwall].append(door['ring'])
opns[doorwall][0] = door
for o in openings[1]:
try:
windowwall = int(o['wall'])
except:
windowwall = int(o['side'])
holes[windowwall].append(o['ring'])
opns[windowwall][1].append(o)
else:
holes = None
opns = None
return holes, opns
def GMLPointList(point):
"""Translates the list of coordinates of one point to a string representation (GML)."""
x = point[0]
y = point[1]
z = point[2]
return "%s %s %s" % (x, y, z)
def multiGMLPointList(points):
"""Translates the list of multiple points to a string representation (GML)."""
l = ""
for t in points:
if len(l) > 0:
l += " "
l += GMLPointList(t)
return l
def GMLstring2points(pointstring):
"""Converts the list of points in string (GML) to a list."""
listPoints = []
#-- List of coordinates
coords = pointstring.split()
#-- Store the coordinate tuple
assert(len(coords) % 3 == 0)
for i in range(0, len(coords), 3):
listPoints.append([coords[i], coords[i+1], coords[i+2]])
return listPoints
def GMLreverser(pointlist):
"""Reverses the order of the points, i.e. the normal of the ring."""
revlist = pointlist[::-1]
return revlist
def GMLreversedRing(r):
"""Reverses a ring."""
gmllist= GMLstring2points(r)
revgmllist= GMLreverser(gmllist)
revring = multiGMLPointList(revgmllist)
return revring
def dormerVertices(dormers, p, h, rtype, oList, width):
"""Computes the vertices of a dormer."""
[o, x, y, z] = oList
dList = []
dListGML = []
for drm in dormers:
d = [[], [], [], [], [], []]
dGML = [[], [], [], [], [], []]
if rtype == 'Gabled':
xperimiter = (float(drm['origin'][1]) * x * 0.5) / h
xperimiter2 = (float(drm['size'][1]) * x * 0.5) / h + xperimiter
if drm['side'] == 1:
d[1] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2] + drm['origin'][1]]
d[2] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + drm['origin'][1]]
d[4] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[5] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[0] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[3] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
elif drm['side'] == 3:
d[1] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]), p[5][2] + drm['origin'][1]]
d[2] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]), p[5][2] + drm['origin'][1]]
d[4] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[5] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[0] = [p[4][0]+xperimiter2, p[7][1] - float(drm['origin'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[3] = [p[4][0]+xperimiter2, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
elif rtype == 'Shed':
xperimiter = (float(drm['origin'][1]) * x * 1.0) / h
xperimiter2 = (float(drm['size'][1]) * x * 1.0) / h + xperimiter
if drm['side'] == 1:
d[1] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2] + drm['origin'][1]]
d[2] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + drm['origin'][1]]
d[4] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[5] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[0] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[3] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
elif rtype == 'Hipped' or rtype == 'Pyramidal':
xperimiter = (float(drm['origin'][1]) * x * 0.5) / h
xperimiter2 = (float(drm['size'][1]) * x * 0.5) / h + xperimiter
yperimiter = (float(drm['origin'][1]) * width) / h
yperimiter2 = (float(drm['size'][1]) * width) / h + yperimiter
if drm['side'] == 1:
d[1] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2] + drm['origin'][1]]
d[2] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + drm['origin'][1]]
d[4] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[5] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[0] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[3] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
elif drm['side'] == 3:
d[1] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]), p[5][2] + drm['origin'][1]]
d[2] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]), p[5][2] + drm['origin'][1]]
d[4] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[5] = [p[4][0]+xperimiter, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[0] = [p[4][0]+xperimiter2, p[7][1] - float(drm['origin'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[3] = [p[4][0]+xperimiter2, p[7][1] - float(drm['origin'][0]) - float(drm['size'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
elif drm['side'] == 0:
d[1] = [p[4][0]+float(drm['origin'][0]), p[4][1] + yperimiter, p[5][2] + drm['origin'][1]]
d[2] = [p[4][0]+float(drm['origin'][0])+float(drm['size'][0]), p[4][1] + yperimiter, p[5][2] + drm['origin'][1]]
d[4] = [p[4][0]+float(drm['origin'][0]), p[4][1] + yperimiter, p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[5] = [p[4][0]+float(drm['origin'][0])+float(drm['size'][0]), p[4][1] + yperimiter, p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[0] = [p[4][0]+float(drm['origin'][0]), p[4][1] + yperimiter2, p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[3] = [p[4][0]+float(drm['origin'][0])+float(drm['size'][0]), p[4][1] + yperimiter2, p[5][2] + drm['origin'][1] + float(drm['size'][1])]
elif drm['side'] == 2:
d[1] = [p[6][0]-float(drm['origin'][0]), p[6][1] - yperimiter, p[5][2] + drm['origin'][1]]
d[2] = [p[6][0]-float(drm['origin'][0])-float(drm['size'][0]), p[6][1] - yperimiter, p[5][2] + drm['origin'][1]]
d[4] = [p[6][0]-float(drm['origin'][0]), p[6][1] - yperimiter, p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[5] = [p[6][0]-float(drm['origin'][0])-float(drm['size'][0]), p[6][1] - yperimiter, p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[0] = [p[6][0]-float(drm['origin'][0]), p[6][1] - yperimiter2, p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[3] = [p[6][0]-float(drm['origin'][0])-float(drm['size'][0]), p[6][1] - yperimiter2, p[5][2] + drm['origin'][1] + float(drm['size'][1])]
elif rtype == 'Flat':
#-- Valid only for roof windows
xperimiter = float(drm['origin'][1])
xperimiter2 = float(drm['size'][1]) + xperimiter
if drm['side'] == 1:
d[1] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2]]
d[2] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2]]
d[4] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[5] = [p[1][0]-xperimiter, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2] + drm['origin'][1] + float(drm['size'][1])]
d[0] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]), p[5][2]]
d[3] = [p[1][0]-xperimiter2, p[1][1] + float(drm['origin'][0]) + float(drm['size'][0]), p[5][2]]
if d != [[], [], [], [], [], []]:
for i in range(0, 6):
#for j in range(0, 3):
# d[i][j] = round(d[i][j], 2)
dGML[i] = GMLPointList(d[i])
dList.append(d)
dListGML.append(dGML)
return dList, dListGML
def interiordormerVertices(dormers, p, h, rtype, oList, width, wallThickness, rWth, dormerTickness, topThickness, rWth2=None):
"""Computes the vertices of a dormer."""
[o, x, y, z] = oList
dList = []
dListGML = []
for drm in dormers:
d = [[], [], [], [], [], []]
dGML = [[], [], [], [], [], []]
if rtype == 'Gabled':
xperimiter = (float(drm['origin'][1]) * x * 0.5) / h
xperimiter2 = (float(drm['size'][1]) * x * 0.5) / h + xperimiter
intxper = (xperimiter + dormerTickness) - rWth
| |
<gh_stars>1-10
"""
..
/------------------------------------------------------------------------------\
| -- FACADE TECHNOLOGIES INC. CONFIDENTIAL -- |
|------------------------------------------------------------------------------|
| |
| Copyright [2019] Facade Technologies Inc. |
| All Rights Reserved. |
| |
| NOTICE: All information contained herein is, and remains the property of |
| Facade Technologies Inc. and its suppliers if any. The intellectual and |
| and technical concepts contained herein are proprietary to Facade |
| Technologies Inc. and its suppliers and may be covered by U.S. and Foreign |
| Patents, patents in process, and are protected by trade secret or copyright |
| law. Dissemination of this information or reproduction of this material is |
| strictly forbidden unless prior written permission is obtained from Facade |
| Technologies Inc. |
| |
\------------------------------------------------------------------------------/
This document contains the BaseApplication class
"""
import sys
import os
import time as t
import json
import psutil
import pywinauto
import pyautogui
import traceback
from datetime import datetime
from typing import Set
if 'CONTEXT' not in locals():
from libs.env import CONTEXT
if CONTEXT in ("API"):
# from .tguiil.tokens import Token
# from .tguiil.application import Application
# from .tguiil.matchoption import MatchOption
# from .tguiil.componentfinder import ComponentFinder
# from .data.tguim.targetguimodel import TargetGuiModel
# from .data.tguim.visibilitybehavior import VisibilityBehavior
pass
elif CONTEXT in ("Sphinx"):
from tguiil.tokens import Token
from tguiil.application import Application
from tguiil.matchoption import MatchOption
from tguiil.componentfinder import ComponentFinder
from data.tguim.targetguimodel import TargetGuiModel
from data.tguim.visibilitybehavior import VisibilityBehavior
else:
raise Exception(f"Invalid context: {CONTEXT}")
pathToThisFile, thisFile = os.path.split(os.path.abspath(__file__))
sys.path.insert(0, pathToThisFile)
class WaitException(Exception):
def __init__(self, msg: str):
Exception.__init__(self, msg)
class BaseApplication:
"""
The core of all Facile APIs: contains functions that are necessary for any API. The
custom generated Application class inherits from this.
"""
def __init__(self, exeLoc: str, options: Set['MatchOption'], name: str, reqCompIds: list, backend: str = 'uia'):
"""
Initializes a BaseApplication instance.
:param exeLoc: filepath to executable for target application
:type exeLoc: str
:param options: options to use when searching for a component
:type options: set
:param name: project name (necessary for opening tguim file)
:type name: str
:param reqCompIds: a list of required components' IDs
:type reqCompIds: list
:param backend: backend type
:type backend: str
"""
# Note that app is a custom Desktop instance from pywinauto, not an application instance.
self.app = Application(backend=backend)
self._isRunning = False
self._options = options
self._exeLoc = exeLoc
self._name = name
self._compFinder = ComponentFinder(self.app, self._options)
self._pathMap = {}
self._compIDs = reqCompIds
try:
with open(os.path.join(pathToThisFile, "tguim.json"), 'r') as tguimFile:
d = json.loads(tguimFile.read())
self._tgm = TargetGuiModel.fromDict(d)
except Exception as e:
print("Couldn't load from ./tguim.json")
self._tgm = None
traceback.print_exc()
self._generatePathMap()
def _startApp(self):
"""
Starts the target application, then waits for all processes' active window to be ready.
"""
if not self._isRunning:
self.app.start(self._exeLoc)
self._isRunning = True
else:
print('Your app is already running. If you want more instances, make another application instance '
'(myApp1 = Application(), myApp2 = Application())')
def stop(self):
"""
Stops the target application and the processes spawned by it
"""
if self._isRunning:
try:
self.app.kill()
except psutil.NoSuchProcess:
pass
else:
print('Your app should not be running. If it is, please report this as a bug on our website.')
def pause(self, demo=False):
"""
Pauses execution while the user interacts with their app.
"""
if not demo:
pyautogui.alert('Execution paused. Press "OK" when ready to continue.')
else:
pyautogui.alert('If this is your first time running a script with your API, open automate.py '
'to check out how you can get the most out of your application.\n\n'
'Press "OK" to close this and your application.')
def wait(self, state: str, timeout: int = 10):
"""
Pauses until state is reached for each process's active window, timing out in timeout seconds.
Useful when waiting for target app to complete execution of a task, or when starting up.
Wraps around pywinauto's wait function.
:param state: state to wait for ('visible', 'ready', 'exists', 'enabled', 'active'), or time to wait in s or m
:type state: str
:param timeout: Maximum number of seconds to wait for state to be reached. Defaults to a minute, should be longer for apps with more windows.
:type timeout: float
"""
try:
if ' s' in state:
t.sleep(float(state[:-2]))
elif ' m' in state:
t.sleep(60 * float(state[:-2]))
else:
self.app.wait(state, timeout)
except Exception as e:
raise WaitException('Not a valid wait time or state. Please use "x s" or "x m" for x seconds/minutes \
respectively, or use one of "visible", "ready", "exists", "enabled", "active" as state to wait for.')
def _generatePathMap(self):
"""
Creates a map of component ID to (supertoken path, handle) tuples, where handle is initialized to None
"""
for id in self._compIDs:
tmpComp = self._getComponentObject(id)
path = [comp.getSuperToken() for comp, pos in tmpComp.getPathFromRoot()][:-1] # The last item is the root
path.reverse() # 1st component is window, second is 1-level deep child, etc.
self._pathMap[id] = (path, None)
def _findComponent(self, compID: int) -> 'pywinauto.base_wrapper.BaseWrapper':
"""
Finds the component with ID compID forcing its appearance if not visible.
:param compID: ID of component to find.
:type compID: int
:return: handle to component
:rtype: pywinauto.base_wrapper.BaseWrapper
"""
path, tmpHandle = self._pathMap[compID]
if tmpHandle:
if tmpHandle.is_visible():
return tmpHandle
comp = self._getComponentObject(compID)
self._forceShow(comp)
handle = self._compFinder.find(comp.getSuperToken(), path)
self._pathMap[compID] = (path, handle)
return handle
def _getComponentObject(self, compID: int) -> 'Component':
"""
Gets the Component object for an item with ID compID. Handles possible errors with getting it.
:param compID: ID of component to get the Component object for
:type compID: int
:return: The component item for an item with ID compID
:rtype: Component
"""
try:
comp = self._tgm.getComponent(compID)
if comp:
return comp
else:
raise Exception("Could not get component " + str(compID) + " from TGUIM.")
except Exception as e:
raise Exception(str(e))
def _getWindowObjectIDFromHandle(self, winHandle: pywinauto.base_wrapper.BaseWrapper) -> 'Component':
"""
Gets the Component object for a component with handle compHandle.
** ONLY WORKS FOR WINDOWS ** (Can be modified for more, but implemented this way to save processing power/time.
:param winHandle: handle of window to get Component object for
:type winHandle: pywinauto.base_wrapper.BaseWrapper
:return: The Component object for an item with handle compHandle
:rtype: Component
"""
# Create Token for handle
timeStamp = datetime.now()
token = Token.createToken(timeStamp, winHandle, captureImage=False)
# determine if the new token matches any super tokens and how well it matches if it does.
bestMatch = 0
bestDecision = Token.Match.NO.value
selectedComponent = None
potentialMatches = []
comps = self._tgm.getComponents()
for id in comps:
comp = comps[id]
st = comp.getSuperToken()
if st.tokens[0].isDialog:
potentialMatches.append((st, comp))
for superToken, comp in potentialMatches:
decision, matchVal = superToken.shouldContain(token)
bestDecision = min(bestDecision, decision.value)
if decision.value == Token.Match.NO.value:
continue
elif decision.value == Token.Match.EXACT.value:
return comp.getId()
elif decision.value == Token.Match.CLOSE.value:
# in the case that multiple SuperTokens closely match the token,
# we'll use the SuperToken that has the higher match.
if matchVal > bestMatch:
bestMatch = matchVal
selectedComponent = comp
# returning no matter what: if selected Comp is none, we don't care, we just return none since this is only used
# for getting the target windows that have already been defined in Facile
if selectedComponent:
return selectedComponent.getId()
def _forceShow(self, compObj: 'Component'):
"""
Attempts to force the component to be visible using visibility behaviors.
Uses a simple breadth-first search algorithm.
:param compObj: Component to show
:type compObj: Component
:return: None
"""
# --- This may be useful for other search algorithms later --- #
# # First, get all necessary lists.
# # The instantiated Visibility Behaviors (These are the edges)
# visBs = self._tgm.getVisibilityBehaviors()
# # All top-level windows in the TGUIM (These are the nodes)
# tlwComps = self._tgm.getTopLevelWindows()
# ------------------------------------------------------------ #
# Get the window that we want to show. This is the starting point.
startWindow, pos = compObj.getPathFromRoot()[-2] # -1 position is root, -2 is window
# Get the currently active windows, which are the algorithm's targets.
# We want the component objects for these, not the actual handles.
targets = []
handles = self.app.windows()
while handles:
handle = | |
<filename>sysopt/symbolic/symbols.py
"""Functions and factories to create symbolic variables."""
import weakref
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from inspect import signature
import numpy as np
from scipy.sparse import dok_matrix, spmatrix
from typing import Union, List, Callable, Tuple, Optional
import sysopt.backends as backend
from sysopt.backends import SymbolicVector
epsilon = 1e-12
def find_param_index_by_name(block, name: str):
try:
return block.find_by_name('parameters', name)
except ValueError:
pass
try:
return block.parameters.index(name)
except ValueError:
pass
raise ValueError(f'Could not find parameter {name} in block {block}.')
def sparse_matrix(shape: Tuple[int, int]):
return dok_matrix(shape, dtype=float)
def is_symbolic(obj):
try:
return obj.is_symbolic
except AttributeError:
return backend.is_symbolic(obj)
def list_symbols(obj):
try:
return obj.symbols()
except AttributeError:
return backend.list_symbols(obj)
def projection_matrix(indices: List[int], dimension: int):
matrix = sparse_matrix((len(indices), dimension))
for i, j in enumerate(indices):
matrix[i, j] = 1
return matrix
__ops = defaultdict(list)
__shape_ops = {}
scalar_shape = (1, )
def infer_scalar_shape(*shapes: Tuple[int, ...]) -> Tuple[int, ...]:
this_shape = shapes[0]
for shape in shapes[1:]:
if shape in (this_shape, scalar_shape):
continue
if this_shape == (1, ):
this_shape = shape
else:
raise AttributeError('Invalid Shape')
return this_shape
def matmul_shape(*shapes: Tuple[int, ...]) -> Tuple[int, ...]:
n, m = shapes[0]
for n_next, m_next in shapes[1:]:
if m != n_next:
raise AttributeError('Invalid shape')
else:
m = m_next
return n, m
def transpose_shape(shape: Tuple[int, int]) -> Tuple[int, ...]:
n, m = shape
return m, n
def infer_shape(op: Callable, *shapes: Tuple[int, ...]) -> Tuple[int, ...]:
"""Infers the output shape from the operation on the given inputs."""
return __shape_ops[op](*shapes)
def register_op(shape_func=infer_scalar_shape):
"""Decorator which register the operator as an expression graph op."""
def wrapper(func):
sig = signature(func)
is_variable = any(
param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD)
for param in sig.parameters.values())
idx = None if is_variable else len(sig.parameters)
__ops[idx].append(func)
__shape_ops[func] = shape_func
return func
return wrapper
def wrap_as_op(func: Callable,
arguments: Optional[int] = None,
shape_func=infer_scalar_shape) -> Callable:
"""Wraps the function for use in expression graphs.
Args:
func: A function to wrap
arguments: The number of arguments
shape_func: A function which generates the output shape from the
arguments.
Returns:
An callable operator for use in an expression graph.
"""
__ops[arguments].append(func)
def wrapper(*args):
return ExpressionGraph(func, *args)
__shape_ops[func] = shape_func
return wrapper
@register_op()
def power(base, exponent):
return base ** exponent
@register_op()
def add(lhs, rhs):
return lhs + rhs
@register_op()
def sub(lhs, rhs):
return lhs - rhs
@register_op(matmul_shape)
def matmul(lhs, rhs):
return lhs @ rhs
@register_op()
def neg(obj):
return -obj
@register_op()
def mul(lhs, rhs):
return lhs * rhs
@register_op()
def div(lhs, rhs):
return lhs / rhs
@register_op(transpose_shape)
def transpose(matrix):
return matrix.T
class LessThanOrEqualTo:
"""Inequality expression."""
def __init__(self, smaller, bigger):
self.smaller = smaller
self.bigger = bigger
def __str__(self):
return f'{self.smaller} <= {self.bigger}'
def symbols(self):
result = set()
for term in (self.smaller, self.bigger):
try:
result |= term.symbols()
except AttributeError:
pass
return result
class Algebraic(metaclass=ABCMeta):
"""Base class for symbolic terms in expression graphs."""
@property
@abstractmethod
def shape(self):
raise NotImplementedError
@abstractmethod
def symbols(self):
raise NotImplementedError
@abstractmethod
def __hash__(self):
raise NotImplementedError
def __add__(self, other):
return ExpressionGraph(add, self, other)
def __radd__(self, other):
return ExpressionGraph(add, other, self)
def __neg__(self):
return ExpressionGraph(neg, self)
def __sub__(self, other):
return ExpressionGraph(sub, self, other)
def __rsub__(self, other):
return ExpressionGraph(sub, other, self)
def __matmul__(self, other):
return ExpressionGraph(matmul, self, other)
def __rmatmul__(self, other):
return ExpressionGraph(matmul, other, self)
def __mul__(self, other):
return ExpressionGraph(mul, self, other)
def __rmul__(self, other):
return ExpressionGraph(mul, other, self)
def __truediv__(self, other):
return ExpressionGraph(div, self, other)
def __rtruediv__(self, other):
return ExpressionGraph(div, other, self)
def __le__(self, other):
return LessThanOrEqualTo(self, other)
def __ge__(self, other):
return LessThanOrEqualTo(other, self)
def __gt__(self, other):
return LessThanOrEqualTo(other, self + epsilon)
def __lt__(self, other):
return LessThanOrEqualTo(self, other + epsilon)
def __cmp__(self, other):
return id(self) == id(other)
def is_op(value):
return any(value in ops for ops in __ops.values())
class ExpressionGraph(Algebraic):
"""Graph representation of a symbolic expression."""
def __init__(self, op, *args):
self.nodes = []
op_node = self.add_or_get_node(op)
self.edges = {}
self.edges.update(
{op_node: [self.add_or_get_node(a) for a in args]}
)
self.head = op_node
@property
def shape(self):
return self._get_shape_of(self.head)
def _get_shape_of(self, node):
if node in self.edges:
op = self.nodes[node]
shapes = [
self._get_shape_of(child)
for child in self.edges[node]
]
return infer_shape(op, *shapes)
obj = self.nodes[node]
try:
return obj.shape
except AttributeError:
if isinstance(obj, (float, int, complex)):
return scalar_shape
raise NotImplementedError(
f'Don\'t know how to get the shape of {obj}'
)
def call(self, values):
def recurse(node):
obj = self.nodes[node]
if is_op(obj):
args = [recurse(child) for child in self.edges[node]]
return obj(*args)
try:
return values[obj]
except (KeyError, TypeError):
pass
return obj
return recurse(self.head)
@property
def is_symbolic(self):
return self.symbols() != {}
def add_or_get_node(self, value):
if value is self:
assert self.head is not None
return self.head
if is_op(value):
idx = len(self.nodes)
self.nodes.append(value)
return idx
if isinstance(value, ExpressionGraph):
return self.merge_and_return_subgraph_head(value)
try:
return self.nodes.index(value)
except ValueError:
pass
# else
idx = len(self.nodes)
self.nodes.append(value)
return idx
def merge_and_return_subgraph_head(self, other):
new_indices = {
old_idx: self.add_or_get_node(node)
for old_idx, node in enumerate(other.nodes)
}
self.edges.update({
new_indices[parent]: [new_indices[child] for child in children]
for parent, children in other.edges.items()
})
return new_indices[other.head]
def push_op(self, op, *nodes):
op_node = self.add_or_get_node(op)
node_indices = [self.add_or_get_node(node) for node in nodes]
self.edges[op_node] = node_indices
self.head = op_node
return self
def __add__(self, other):
return self.push_op(add, self, other)
def __radd__(self, other):
return self.push_op(add, other, self)
def __neg__(self):
return self.push_op(neg, self)
def __sub__(self, other):
return self.push_op(sub, self, other)
def __rsub__(self, other):
return self.push_op(sub, other, self)
def __mul__(self, other):
return self.push_op(mul, self, other)
def __rmul__(self, other):
return self.push_op(mul, other, self)
def __truediv__(self, other):
return self.push_op(div, self, other)
def __rtruediv__(self, other):
return self.push_op(div, other, self)
def __matmul__(self, other):
return self.push_op(matmul, self, other)
def __rmatmul__(self, other):
return self.push_op(matmul, other, self)
def __pow__(self, exponent, modulo=None):
return self.push_op(power, self, exponent)
def __hash__(self):
return hash((self.edges, *[hash(n) for n in self.nodes]))
def symbols(self):
def recurse(node):
obj = self.nodes[node]
if not is_op(obj):
try:
return obj.symbols()
except AttributeError:
return set()
child_symbols = set.union(
*(recurse(child)
for child in self.edges[node])
)
if obj is evaluate_signal:
return child_symbols - {get_time_variable()}
else:
return child_symbols
return recurse(self.head)
class Variable(Algebraic):
"""Symbolic type for a free variable."""
is_symbolic = True
def __init__(self, name=None, shape=scalar_shape):
self._shape = shape
self.name = name
@property
def shape(self):
return self._shape
def symbols(self):
return {self}
def __hash__(self):
return hash(id(self))
def __cmp__(self, other):
return id(self) == id(other)
_t = Variable('t')
class Parameter(Algebraic):
"""Symbolic type for variables bound to a block parameter.
Args:
block: The model block from which to derive the symbolic parameter.
parameter: Index or name of the desired symbolic parameter.
"""
_table = {}
def __new__(cls, block, parameter: Union[str, int]):
if isinstance(parameter, str):
index = find_param_index_by_name(block, parameter)
else:
index = parameter
assert 0 <= index < len(block.parameters),\
f'Invalid parameter index for {block}: got {parameter},'\
f'expected a number between 0 and {len(block.parameters)}'
uid = (id(block), index)
try:
return Parameter._table[uid]
except KeyError:
pass
obj = Algebraic.__new__(cls)
obj.__init__()
setattr(obj, 'uid', uid)
setattr(obj, 'index', index)
setattr(obj, '_parent', weakref.ref(block))
Parameter._table[uid] = obj
return obj
def __hash__(self):
return hash(self.uid)
def __cmp__(self, other):
try:
return self.uid == other.uid
except AttributeError:
return False
def get_source_and_slice(self):
return self._parent(), slice(self.index, self.index + 1, None)
@property
def name(self):
return self._parent().parameters[self.index]
@property
def shape(self):
return scalar_shape
def symbols(self):
return {self}
@register_op()
def evaluate_signal(signal, t):
return signal(t)
class SignalReference(Algebraic):
"""Symbolic variable representing a time varying signal.
Args:
port: The model port from which this signal is derived.
"""
_signals = {}
def __init__(self, port):
self.port = port
def __new__(cls, port):
source_id = id(port)
try:
new_signal = SignalReference._signals[source_id]()
assert new_signal is not None
return new_signal
except (KeyError, AssertionError):
pass
new_signal = super().__new__(cls)
SignalReference._signals[source_id] = weakref.ref(new_signal)
return new_signal
@property
def t(self):
return get_time_variable()
@property
def shape(self):
return len(self.port),
def __hash__(self):
return hash(self.port)
def __cmp__(self, other):
try:
return self.port is other.port
except AttributeError:
return False
def __call__(self, t):
return ExpressionGraph(evaluate_signal, self, t)
def symbols(self):
return {self, self.t}
def as_vector(arg):
try:
len(arg)
return arg
except TypeError:
if isinstance(arg, (int, float)):
return arg,
if is_symbolic(arg):
return backend.cast(arg)
raise NotImplementedError(
f'Don\'t know to to vectorise {arg.__class__}'
)
def get_time_variable():
return _t
def _is_subtree_constant(graph, node):
obj = graph.nodes[node]
if not is_op(obj):
return not is_temporal(obj)
if obj is evaluate_signal:
return True
return all(
_is_subtree_constant(graph, child) for child in graph.edges[node]
)
def is_temporal(symbol):
if isinstance(symbol, ExpressionGraph):
return not _is_subtree_constant(symbol, symbol.head)
if isinstance(symbol, SignalReference):
return True
if symbol is get_time_variable():
return True
if is_op(symbol):
return False
return False
def is_matrix(obj):
return isinstance(obj, (np.ndarray, spmatrix))
def lambdify(graph: ExpressionGraph,
arguments: List[Union[Algebraic, List[Algebraic]]],
name: str = 'f'
):
substitutions = {}
for i, arg in enumerate(arguments):
if isinstance(arg, list):
assert all(sub_arg.shape == scalar_shape for sub_arg in arg), \
'Invalid arguments, lists | |
and self.last_point else None
@property
def xdelta_direct(self):
"""direct x-distance between end points
the sign is only meaningful for directed edges
:return: distance in pixels
"""
return None if not self.xrange_direct or not self.xrange_direct[0] or not self.xrange_direct[1] \
else self.xrange_direct[1] - self.xrange_direct[0]
@property
def ydelta_direct(self):
"""direct y-distance between end points
the sign is only meaningful for directed edges
:return: distance in pixels
"""
return None if not self.yrange_direct or not self.yrange_direct[0] or not self.yrange_direct[1] \
else self.yrange_direct[1] - self.yrange_direct[0]
# class AmiEdge:
def create_line_segments(self, tolerance=1):
"""create AmiLine segments from sknw points
:param tolerance: totlerance in pixels
:return: array of lines
"""
points = self.nx_graph[self.start_id][self.end_id][self.branch_id][AmiEdge.PTS]
points2 = approximate_polygon(points, tolerance=tolerance)
return points2
def find_single_line(self, tol=1) -> AmiLine:
"""segments the edge into straight lines (AmiLine)
If segmentation gives a single line (of any orientation) returns it
else None
:param tol: max deviation of points from segments , def = 1
:return: AmiLine or None.
"""
segments = self.get_segments(tol)
return segments[0] if len(segments) == 1 else None
@classmethod
def get_single_lines(cls, edges) -> list:
"""extracts single line from any edges which have one
:return: list of AmiLines from edges which have exactly one"""
ami_lines = []
for ami_edge in edges:
ami_line = ami_edge.find_single_line()
if ami_line is not None:
ami_lines.append(ami_line)
return ami_lines
def get_axial_lines(self, tolerance=1) -> list:
"""segments the edge into straight lines parallel to axes (AmiLine)
If All segments are aligned with axes, returns that list else None
:param tolerance: max deviation of points from segments
:return: list of AmiLines or None.
"""
segments = self.get_segments(tolerance=tolerance) # maybe cache this
if len(segments) > 1:
logger.debug(f"segments {len(segments)} ... {self}")
corners = self._get_axial_corners(segments, tolerance=tolerance)
if len(corners) == len(segments) - 1:
return segments
return None
@classmethod
def get_axial_polylines(cls, edges, tolerance=1) -> list:
"""extracts axial polylines from any edges which consist of 2 or more axial lines
:return: list of polylines (lists of AmiLines) from edges which have 2 or more"""
axial_polylines = []
for ami_edge in edges:
ami_lines = ami_edge.get_axial_lines(tolerance=tolerance)
if ami_lines is not None and len(ami_lines) > 1:
axial_polylines.append(ami_lines)
return axial_polylines
# class AmiEdge:
def plot_edge(self, pts, plot_region, edge_id=None, boxcolor=None):
"""
include bbox
:param edge_id:
:param pts: points in nx_graph format
:param plot_region:
:param boxcolor: if not None plot edge box in this colour
:return:
"""
colors = ["green", "blue", "magenta", "cyan"]
if boxcolor is not None:
bbox = self.get_or_create_bbox()
AmiGraph.add_bbox_rect(plot_region, bbox, linewidth=1, edgecolor=boxcolor, facecolor="none")
edgecolor = colors[0] if edge_id is None else colors[edge_id % len(colors)]
plt.plot(pts[:, 1], pts[:, 0], edgecolor)
# class AmiEdge:
def get_segments(self, tolerance=1):
"""extracts line segments by skimage.measure.approximate_polygon() (Douglas-Peucker)
:param tolerance: max deviation of curve from line segments
:return: list of line segments as AmiLines
"""
points_array = np.array(self.get_coords())
self._get_or_create_segment_points(points_array, tolerance)
self.segments = []
for i, _ in enumerate(self.line_points[:-1]):
pt0 = self.line_points[i]
pt1 = self.line_points[i + 1]
ami_line = AmiLine([pt0, pt1])
self.segments.append(ami_line)
return self.segments
def _get_or_create_segment_points(self, points_array, tolerance):
"""calculate segments with approximate_polygon()
recalculate if tolerance changes"""
if self.line_points is None or tolerance != self.tolerance:
self.line_points = approximate_polygon(points_array, tolerance)
self.tolerance = tolerance
return self.line_points
def get_single_segment(self, segments=None, tolerance=1):
"""get edge as a single segment
:return: a single segment if D-P finds it within self.tolerance, else None
"""
segments = self.get_segments(tolerance=tolerance) if segments is None else segments
return segments[0] if len(segments) == 1 else None
def is_horizontal(self, tolerance=1):
segment = self.get_single_segment(tolerance=tolerance)
return segment is not None and segment.is_horizontal(tolerance=tolerance)
def is_vertical(self, tolerance=1):
segment = self.get_single_segment(tolerance=tolerance)
return segment is not None and segment.is_vertical(tolerance=tolerance)
# class AmiEdge:
@classmethod
def _get_axial_corners(cls, segments, tolerance):
"""
finds Hor-Vert and Vert-Hor corners in segments
:param tolerance:
:return: list of corners (last_segment, next_segment)
"""
last_segment = None
corners = []
for segment in segments:
if last_segment:
lvert = last_segment.is_vertical(tolerance=tolerance)
thoriz = segment.is_horizontal(tolerance=tolerance)
lhoriz = last_segment.is_horizontal(tolerance=tolerance)
tvert = segment.is_vertical(tolerance=tolerance)
if (lvert and thoriz) or (tvert and lhoriz):
corners.append((last_segment, segment))
last_segment = segment
return corners
@classmethod
def get_vertical_edges(cls, ami_edges, tolerance=1):
return list(filter(lambda ami_edge: ami_edge.is_vertical(tolerance=tolerance), ami_edges))
@classmethod
def get_horizontal_edges(cls, ami_edges, tolerance=1):
return list(filter(lambda ami_edge: ami_edge.is_horizontal(tolerance=tolerance), ami_edges))
@classmethod
def get_non_axial_edges(cls, ami_edges, tolerance=1):
"""edges other than horizontal or vertical
can be slanted straight lines or curves
:param ami_edges:
:param tolerance: default 1"""
return list(filter(
lambda ami_edge:
not ami_edge.is_horizontal(tolerance=tolerance) and not ami_edge.is_vertical(tolerance=tolerance),
ami_edges))
@classmethod
def get_vertical_lines(cls, ami_edges, tolerance=2):
"""
get vertical lines from edges
:param ami_edges: edges
:param tolerance:
:return: lines or empty list
"""
vertical_edges = AmiEdge.get_vertical_edges(ami_edges, tolerance=tolerance)
vert_ami_lines = AmiEdge.get_single_lines(vertical_edges)
return vert_ami_lines
@classmethod
def get_horizontal_lines(self, ami_edges, tolerance=2):
"""
get horizontal lines from edges
:param ami_edges: edges
:param tolerance:
:return: lines or empty list
"""
horizontal_edges = AmiEdge.get_horizontal_edges(ami_edges, tolerance=tolerance)
horiz_ami_lines = AmiEdge.get_single_lines(horizontal_edges)
return horiz_ami_lines
def end_point(self, endx):
"""select end 0 (first) or 1 (last)
:param endx: 0 or 1
:return: point [x,y] or None
"""
if not endx or endx < 0 or endx > 1 or not self.points_xy:
return None
return self.first_point if endx == 0 else self.last_point
def is_cyclic(self):
"""start and end are identical and length > 1"""
return len(self.points_xy) > 1 and self.start_id and self.start_id == self.end_id
# =========================================
@classmethod
def plot_all_lines(cls, nx_graph, lines, tolerance):
"""
plots edges as lines
compare with above, maybe merge
:param nx_graph:
:param lines: to plot - where from?
:param tolerance:
:return:
"""
assert type(lines) is list, f"lines should be list {lines}"
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(9, 4))
ax1.set_aspect('equal')
ax2.set_aspect('equal')
for line in lines:
# fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(9, 4))
for i, j in line:
assert type(i) is int, f"i should be int {type(i)}"
assert type(j) is int, f"j should be int {type(j)}"
AmiEdge.douglas_peucker_plot_line(nx_graph, i, j, tolerance, ax1, ax2)
plt.show()
@classmethod
def douglas_peucker_plot_line(cls, nx_graph, i, j, tolerance, ax1, ax2):
"""this may be replaced by create_ami_lines()"""
points = nx_graph[i][j][AmiEdge.PTS]
# original wiggly line
# x and y are reversed in sknw
ax1.plot(points[:, 1], -points[:, 0]) # negative since down the page
points2 = approximate_polygon(points, tolerance=tolerance)
# the line is not directed so find which end fits which node is best
distij = cls.move_line_ends_to_closest_node(nx_graph, (i, j), points2, move=False)
distji = cls.move_line_ends_to_closest_node(nx_graph, (j, i), points2, move=False)
ij = (i, j) if distij < distji else (j, i)
AmiEdge.move_line_ends_to_closest_node(nx_graph, ij, points2, move=True)
ax2.plot(points2[:, 1], -points2[:, 0])
# class AmiEdge:
@classmethod
def move_line_ends_to_closest_node(cls, nx_graph, ij, points, move=False):
pts = [points[0], points[-1]]
node_pts = [nx_graph.nodes[ij[0]][AmiNode.CENTROID], nx_graph.nodes[ij[1]][AmiNode.CENTROID]]
delta_dist = None
if move:
points[0] = node_pts[0]
points[-1] = node_pts[1]
else:
delta_dist = math.dist(pts[0], node_pts[0]) + math.dist(pts[1], node_pts[1])
return delta_dist
@classmethod
def create_normalized_edge_id_tuple(cls, node_id1, node_id2, branch_id):
"""ensures that node_id1 <= node_id2
This counts each edge only once"""
if branch_id is None:
return (node_id1, node_id2) if node_id1 <= node_id2 else (node_id2, node_id1)
return (node_id1, node_id2, branch_id) if node_id1 <= node_id2 else (node_id2, node_id1, branch_id)
@classmethod
def is_normalized_edge(cls, start_id, end_id):
"""requires not None and start_id <= end_id
:param start_id:
:param end_id:
"""
return start_id is not None and end_id is not None and start_id <= end_id
def get_coords(self):
return self.points_xy
@classmethod
def get_common_node_id(cls, id0, id1):
"""find node common to two edges
(ignores multiple edges at present)
(1,2,0 and (2,3,0) have 2 in common
(1,2,0) and (3,4,0) have None in common
(1,2,0) and (1,2,1) will return 1 or 2 at random
:param id0: first edge (could be duple or triple)
:param id1: as for first edge
:return: common node id or None
"""
if len(id0) < 2 or len(id1) < 2:
raise ValueError("bad ids {id0} {id1}")
if id0[0] == id1[0] or id0[0] == id1[1]:
return id0[0]
if id0[1] == id1[0] or id0[1] == id1[1]:
return id0[1]
return None
"""a wrapper for an sknw/nx node, still being developed"""
"""
==========================================================================
===============================NODE=======================================
==========================================================================
"""
class AmiNode:
"""Node holds coordinates
["o"] for centrois (AmiNode.CENTROID)
["pts"] for multiple points (AmiNode.POINTS)
ALL COORDINATES COMMUNICATED BY/TO USER ARE X,Y
(SKNW uses y,x coordinates)
"""
CENTROID = "o"
NODE_PTS = "pts"
NEXT_ANG = "next_angle"
PIXLEN = "pixlen"
REMOTE = "remote"
def __init__(self, node_id, ami_graph=None, nx_graph=None, _private=False):
"""
:param node_id: mandatory
:param ami_graph: will use ami_graph.nx_graph
:param nx_graph: else will use nx_graph
"""
if not _private:
raise ValueError("Do not call AmiNode directly; ise ami_graph.get_or_create_node*() factories")
| |
json files)
files_sounds = os.listdir('./sounds/')
files_analysis = os.listdir('./analysis/')
files_baskets = os.listdir('./baskets/')
files_baskets_pickle = os.listdir('./baskets_pickle/')
files_analysis_stats = os.listdir('./analysis_stats/')
settings = SettingsSingleton()
settings.local_sounds = []
settings.local_analysis = []
settings.local_baskets = []
settings.local_baskets_pickle = []
settings.local_analysis_stats = []
for i in files_sounds:
settings.local_sounds.append(int(i[:-5]))
for j in files_analysis:
settings.local_analysis.append(int(j[:-5]))
for m in files_baskets:
settings.local_baskets.append(m[:-5])
for n in files_baskets_pickle:
settings.local_baskets_pickle.append(n)
for k in files_analysis_stats:
settings.local_analysis_stats.append(int(k[:-5]))
settings.local_sounds.sort()
settings.local_analysis.sort()
settings.local_analysis_stats.sort()
def _init_oauth(self):
try:
import api_key
reload(api_key)
client_id = api_key.client_id
token = api_key.token
refresh_oauth = api_key.refresh_oauth
print ' Authenticating:\n'
req = 'curl -X POST -d "client_id=' + client_id + '&client_secret=' + token + \
'&grant_type=refresh_token&refresh_token=' + refresh_oauth + '" ' + \
'"https://www.freesound.org/apiv2/oauth2/access_token/"'
output = subprocess.check_output(req, shell=True)
output = ast.literal_eval(output)
access_oauth = output['access_token']
refresh_oauth = output['refresh_token']
self._write_api_key(client_id, token, access_oauth, refresh_oauth)
self.token = token
self.client_id = client_id
self.access_oauth = access_oauth
except ImportError:
client_id = raw_input('Enter your client id: ')
token = raw_input('Enter your api key: ')
code = raw_input('Please go to: https://www.freesound.org/apiv2/oauth2/authorize/?client_id=' + client_id + \
'&response_type=code&state=xyz and enter the ginve code: ')
print '\n Authenticating:\n'
req = 'curl -X POST -d "client_id=' + client_id + '&client_secret=' + token + \
'&grant_type=authorization_code&code=' + code + '" ' + \
'"https://www.freesound.org/apiv2/oauth2/access_token/"'
output = subprocess.check_output(req, shell=True)
output = ast.literal_eval(output)
access_oauth = output['access_token']
refresh_oauth = output['refresh_token']
self._write_api_key(client_id, token, access_oauth, refresh_oauth)
self.token = token
self.client_id = client_id
self.access_oauth = access_oauth
except:
print 'Could not authenticate'
return
self._set_oauth()
print '\n Congrats ! Your are now authenticated \n'
print freesound_rocks_ascii_art
@staticmethod
def _write_api_key(client_id, token, access_oauth, refresh_oauth):
file = open('api_key.py', 'w')
file.write('client_id = "' + client_id + '"')
file.write('\n')
file.write('token = "' + token + '"')
file.write('\n')
file.write('access_oauth = "' + access_oauth + '"')
file.write('\n')
file.write('refresh_oauth = "' + refresh_oauth + '"')
file.close()
def _set_oauth(self):
self.set_token(self.access_oauth, auth_type='oauth')
def _set_token(self):
self.set_token(self.token)
#_________________________________________________________________#
# Analysis class #
#_________________________________________________________________#
class Analysis():
"""
Analysis nested object. Holds all the analysis of many sounds
"""
def __init__(self, json_dict = None):
if not json_dict:
with open('analysis_template.json') as infile:
json_dict = simplejson.load(infile)
self.json_dict = json_dict
def replace_dashes(d):
for k, v in d.items():
if "-" in k:
d[k.replace("-", "_")] = d[k]
del d[k]
if isinstance(v, dict): replace_dashes(v)
replace_dashes(json_dict)
self.__dict__.update(json_dict)
for k, v in json_dict.items():
if isinstance(v, dict):
self.__dict__[k] = Analysis(v)
def rsetattr(self, attr, val):
pre, _, post = attr.rpartition('.')
return setattr(self.rgetattr(pre) if pre else self, post, val)
sentinel = object()
def rgetattr(self, attr, default=sentinel):
if default is self.sentinel:
_getattr = getattr
else:
def _getattr(obj, name):
return getattr(obj, name, default)
return reduce(_getattr, [self] + attr.split('.'))
def remove(self, index, descriptor):
if index == 'all':
self.rsetattr(descriptor, [])
else:
analysis = self.rgetattr(descriptor)
del analysis[index]
#_________________________________________________________________#
# Basket class #
#_________________________________________________________________#
class Basket:
"""
A basket where sounds and analysis can be loaded
>>> c = manager.Client()
>>> b = c.new_basket()
TODO : add comments attribute, title...
"""
def __init__(self, client):
self.sounds = []
self.analysis = Analysis() # the use of the nested object is not rly good...
self.analysis_stats = []
self.analysis_stats_names = []
self.ids = []
self.analysis_names = []
self.parent_client = client
self._update_sound_client()
def __add__(self, other):
"""
Concatenate two baskets
TODO : adapt it to new changes & make sure the order is not broken
"""
sumBasket = copy.deepcopy(self)
for i in range(len(other.sounds)):
sumBasket.ids.append(other.ids[i])
sumBasket.sounds.append(other.sounds[i])
sumBasket._remove_duplicate()
return sumBasket
def __sub__(self, other):
"""
Return a basket with elements of self that are not in other
"""
subBasket = copy.deepcopy(self)
idx_to_remove = [x[0] for x in enumerate(self.ids) if x[1] in other.ids]
subBasket.remove(idx_to_remove)
return subBasket
def __len__(self):
return len(self.ids)
def _actualize(self): # used when an old basket is loaded from pickle
if not hasattr(self, 'analysis_stats'):
self.analysis_stats = []
def _update_sound_client(self):
for i, sound in enumerate(self.sounds):
if sound is not None:
sound.client = self.parent_client
def _remove_duplicate(self):
# TODO : add method to concatenate analysis in Analysis() (won't have to reload json...)
ids_old = self.ids
sounds_old = self.sounds
self.ids = []
self.sounds = []
nbSounds = len(ids_old)
for i in range(nbSounds):
if ids_old[i] not in self.ids:
self.ids.append(ids_old[i])
self.sounds.append(sounds_old[i])
self.update_analysis()
#________________________________________________________________________#
# __________________________ Users functions ____________________________#
def push(self, sound, analysis_stat=None):
"""
>>> sound = c.my_get_sound(query='wind')
>>> b.push(sound)
"""
#sound.name = strip_non_ascii(sound.name)
self.sounds.append(sound)
self.analysis_stats.append(analysis_stat)
if sound is not None:
self.ids.append(sound.id)
else:
self.ids.append(None)
def push_list_id(self, sounds_id):
Bar = ProgressBar(len(sounds_id), LENGTH_BAR, 'Loading sounds')
Bar.update(0)
for idx, id in enumerate(sounds_id):
sound = self.parent_client.my_get_sound(id)
self.push(sound)
Bar.update(idx+1)
def remove(self, index_list):
index_list = sorted(index_list, reverse=True)
for i in index_list:
del self.ids[i]
del self.sounds[i]
try:
del self.analysis_stats[i]
except IndexError:
pass
if hasattr(self, 'clas'):
del self.clas[i]
for descriptor in self.analysis_names:
self.analysis.remove(i, descriptor)
def remove_sounds_with_no_analysis(self):
list_idx_to_remove = []
for idx, analysis in enumerate(self.analysis_stats):
if analysis is None:
list_idx_to_remove.append(idx)
self.remove(list_idx_to_remove)
def update_sounds(self):
"""
Use this method to load the sounds which ids are in the basket
"""
nbSound = len(self.ids)
Bar = ProgressBar(nbSound, LENGTH_BAR, 'Loading sounds')
Bar.update(0)
for i in range(nbSound):
self.sounds.append(self.parent_client.my_get_sound(self.ids[i]))
Bar.update(i+1)
def add_analysis(self, descriptor):
"""
Use this method to add the analysis.
All the current loaded analysis will be erased
All the analysis of the loaded sound ids will be loaded
>>> results_pager = c.my_text_search(query='wind')
>>> b.load_sounds(results_pager)
>>> b.add_analysis('lowlevel.mfcc')
"""
if descriptor in self.analysis_names:
print 'The %s analysis are already loaded' % descriptor
else:
nbSound = len(self.ids)
allFrames = []
Bar = ProgressBar(nbSound,LENGTH_BAR, 'Loading ' + descriptor + ' analysis')
Bar.update(0)
for i in range(nbSound):
allFrames.append(self.parent_client.my_get_analysis(self.ids[i], descriptor))
Bar.update(i+1)
self.analysis_names.append(descriptor)
self.analysis.rsetattr(descriptor, allFrames)
def update_analysis(self):
for nameAnalysis in self.analysis_names:
allFrames = self.analysis.rgetattr(nameAnalysis)
nbAnalysis = len(allFrames)
nbAnalysisToLoad = len(self.ids) - nbAnalysis
Bar = ProgressBar(nbAnalysisToLoad, LENGTH_BAR, 'Loading ' + nameAnalysis + ' analysis')
Bar.update(0)
for i in range(nbAnalysisToLoad):
Bar.update(i + 1)
allFrames.append(self.parent_client.my_get_analysis(self.ids[i+nbAnalysis], nameAnalysis))
def add_analysis_stats(self):
"""
Use this method to add all analysis stats to all sounds in the basket
(means and var of descriptors)
"""
#self.analysis_stats = []
nbSounds = len(self.sounds)
Bar = ProgressBar(nbSounds, LENGTH_BAR, 'Loading analysis stats')
Bar.update(0)
for i, sound in enumerate(self.sounds):
Bar.update(i + 1)
if sound is not None:
analysis = self.parent_client.my_get_analysis_stats(sound.id)
self.analysis_stats[i] = analysis
else:
self.analysis_stats[i] = None # HERE CHANGED APPEND TO I, is it ok ?
# try:
# self.analysis_stats.append(sound.get_analysis())
# except freesound.FreesoundException:
# pass
# FUNCTION FOR ADDING STATS OF ONLY ONE ANALYSIS
def add_one_analysis_stats(self, descriptor):
nbSounds = len(self.sounds)
Bar = ProgressBar(nbSounds, LENGTH_BAR, 'Loading analysis stats')
Bar.update(0)
for i, sound in enumerate(self.sounds):
Bar.update(i + 1)
if sound is not None:
analysis = self.parent_client.my_get_one_analysis_stats(sound.id, descriptor)
self.analysis_stats[i] = analysis
else:
self.analysis_stats[i] = None
def remove_analysis(self, descriptor):
if descriptor in self.analysis_names:
self.analysis.remove('all', descriptor)
self.analysis_names.remove(descriptor)
def load_sounds_(self, results_pager, begin_idx=0, debugger=None):
"""
IN PROGRESS
This function is used when the data to load in the basket is in the pager (and not just the id like for the next function)
"""
nbSound = results_pager.count
numSound = begin_idx # for iteration
results_pager_last = results_pager
Bar = ProgressBar(nbSound,LENGTH_BAR,'Loading sounds')
Bar.update(0)
# 1st iteration # maybe there is a better way to iterate through pages...
for sound in results_pager:
try:
self.push(sound, sound.analysis)
except AttributeError:
self.push(sound)
numSound = numSound+1
Bar.update(numSound+1)
# next iteration
while (numSound<nbSound):
count = 0
while 1: # care with this infinite loop...
count += 1
if count>10: # MAYBE SOME BUG HERE
print 'could not get more sounds'
break
try:
results_pager = results_pager_last.next_page()
if debugger:
debugger.append(results_pager)
break
except:
exc_info = sys.exc_info()
sleep(1)
print exc_info
for sound in results_pager:
try:
self.push(sound, sound.analysis)
except AttributeError:
self.push(sound)
numSound = numSound+1
Bar.update(numSound+1)
results_pager_last = results_pager
def extract_descriptor_stats(self, scale=False):
"""
Returns a list of the scaled and concatenated descriptor stats - mean and var (all the one that are loaded in the Basket) for all sounds in the Basket.
"""
feature_vector = []
for analysis_stats in self.analysis_stats:
feature_vector_single_sound = []
for k, v in analysis_stats.as_dict().iteritems():
if k == 'lowlevel':
for k_, v_ in v.iteritems():
try: # some lowlevel descriptors do not have 'mean' 'var' field (eg average_loudness)
# barkbands_kurtosis has 0 variance and that bring dvar and dvar2 to be None...
if isinstance(v_['mean'], list):
feature_vector_single_sound += v_['mean'] # take the mean
feature_vector_single_sound += v_['dmean']
feature_vector_single_sound += v_['dmean2']
feature_vector_single_sound += v_['var'] # var
feature_vector_single_sound += v_['dvar']
feature_vector_single_sound += v_['dvar2']
elif isinstance(v_['mean'], | |
<gh_stars>1-10
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you may redistribute it, and/or
# modify it, under the terms of the GNU General Public License
# as published by the Free Software Foundation - either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, write to:
#
# the Free Software Foundation Inc.
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA
#
# or go online at: http://www.gnu.org/licenses/ to view license options.
#
# ##### END GPL LICENCE BLOCK #####
# authors: dudecon, jambay
# This module contains the UI definition, display,
# and processing (create mesh) functions.
# The routines to generate the vertices for the wall
# are found in the "Blocks" module.
import bpy
from bpy.types import Operator
from bpy.props import (
BoolProperty,
FloatProperty,
)
from .Blocks import (
NOTZERO, PI,
dims,
settings,
shelfSpecs,
stepSpecs,
createWall,
radialized,
slope,
openingSpecs,
bigBlock,
shelfExt,
stepMod,
stepLeft,
shelfBack,
stepOnly,
stepBack,
)
class add_mesh_wallb(Operator):
bl_idname = "mesh.wall_add"
bl_label = "Add a Masonry Wall"
bl_description = "Create a block (masonry) wall mesh"
bl_options = {'REGISTER', 'UNDO'}
# UI items - API for properties - User accessible variables...
# not all options are via UI, and some operations just don't work yet
# only create object when True
# False allows modifying several parameters without creating object
ConstructTog = BoolProperty(
name="Construct",
description="Generate the object",
default=True
)
# need to modify so radial makes a tower (normal);
# want "flat" setting to make disk (alternate)
# make the wall circular - if not sloped it's a flat disc
RadialTog = BoolProperty(
name="Radial",
description="Make masonry radial",
default=False
)
# curve the wall - if radial creates dome.
SlopeTog = BoolProperty(
name="Curved",
description="Make masonry sloped, or curved",
default=False
)
# need to review defaults and limits for all of these UI objects
# wall area/size
WallStart = FloatProperty(
name="Start",
description="Left side, or start angle",
default=-10.0,
min=-100, max=100.0
)
WallEnd = FloatProperty(
name="End",
description="Right side, or end angle",
default=10.0,
min=0.0, max=100.0
)
WallBottom = FloatProperty(
name="Bottom",
description="Lower height or radius",
default=0.0,
min=-100, max=100
)
WallTop = FloatProperty(
name="Top",
description="Upper height or radius",
default=15.0,
min=0.0, max=100.0
)
EdgeOffset = FloatProperty(
name="Edging",
description="Block staggering on wall sides",
default=0.6, min=0.0, max=100.0
)
# block sizing
Width = FloatProperty(
name="Width",
description="Average width of each block",
default=1.5,
min=0.01, max=100.0
)
WidthVariance = FloatProperty(
name="Variance",
description="Random variance of block width",
default=0.5,
min=0.0, max=100.0
)
WidthMinimum = FloatProperty(
name="Minimum",
description="Absolute minimum block width",
default=0.5,
min=0.01, max=100.0
)
Height = FloatProperty(
name="Height",
description="Average Height of each block",
default=0.7,
min=0.01, max=100.0
)
HeightVariance = FloatProperty(
name="Variance",
description="Random variance of block Height",
default=0.3,
min=0.0, max=100.0
)
HeightMinimum = FloatProperty(
name="Minimum",
description="Absolute minimum block Height",
default=0.25,
min=0.01, max=100.0
)
Depth = FloatProperty(
name="Depth",
description="Average Depth of each block",
default=2.0,
min=0.01, max=100.0
)
DepthVariance = FloatProperty(
name="Variance",
description="Random variance of block Depth",
default=0.1,
min=0.0, max=100.0
)
DepthMinimum = FloatProperty(
name="Minimum",
description="Absolute minimum block Depth",
default=1.0,
min=0.01, max=100.0
)
MergeBlock = BoolProperty(
name="Merge Blocks",
description="Make big blocks (merge closely adjoining blocks)",
default=False
)
# edging for blocks
Grout = FloatProperty(
name="Thickness",
description="Distance between blocks",
default=0.1,
min=-10.0, max=10.0
)
GroutVariance = FloatProperty(
name="Variance",
description="Random variance of block Grout",
default=0.03,
min=0.0, max=100.0
)
GroutDepth = FloatProperty(
name="Depth",
description="Grout Depth from the face of the blocks",
default=0.1,
min=0.0001, max=10.0
)
GroutDepthVariance = FloatProperty(
name="Variance",
description="Random variance of block Grout Depth",
default=0.03,
min=0.0, max=100.0
)
GroutEdge = BoolProperty(
name="Edging",
description="<NAME>",
default=False
)
# properties for openings
Opening1Tog = BoolProperty(
name="Opening(s)",
description="Make windows or doors",
default=True
)
Opening1Width = FloatProperty(
name="Width",
description="The Width of the first opening",
default=2.5,
min=0.01, max=100.0
)
Opening1Height = FloatProperty(
name="Height",
description="The Height of the first opening",
default=3.5,
min=0.01, max=100.0
)
Opening1X = FloatProperty(
name="Indent",
description="The x position or spacing of the first opening",
default=5.0,
min=-100, max=100.0
)
Opening1Z = FloatProperty(
name="Bottom",
description="The z position of the First opening",
default=5.0,
min=-100, max=100.0
)
Opening1Repeat = BoolProperty(
name="Repeat",
description="make multiple openings, with spacing X1",
default=False
)
Opening1TopArchTog = BoolProperty(
name="Top Arch",
description="Add an arch to the top of the first opening",
default=True
)
Opening1TopArch = FloatProperty(
name="Curve",
description="Height of the arch on the top of the opening",
default=2.5,
min=0.001, max=100.0
)
Opening1TopArchThickness = FloatProperty(
name="Thickness",
description="Thickness of the arch on the top of the opening",
default=0.75,
min=0.001, max=100.0
)
Opening1BtmArchTog = BoolProperty(
name="Bottom Arch",
description="Add an arch to the bottom of opening 1",
default=False
)
Opening1BtmArch = FloatProperty(
name="Curve",
description="Height of the arch on the bottom of the opening",
default=1.0,
min=0.01, max=100.0
)
Opening1BtmArchThickness = FloatProperty(
name="Thickness",
description="Thickness of the arch on the bottom of the opening",
default=0.5,
min=0.01, max=100.0
)
Opening1Bevel = FloatProperty(
name="Bevel",
description="Angle block face",
default=0.25,
min=-10.0, max=10.0
)
# openings on top of wall
CrenelTog = BoolProperty(
name="Crenels",
description="Make openings along top of wall",
default=False
)
CrenelXP = FloatProperty(
name="Width",
description="Gap width in wall based the percentage of wall width",
default=0.25,
min=0.10, max=1.0,
subtype="PERCENTAGE"
)
CrenelZP = FloatProperty(
name="Height",
description="Crenel Height as the percentage of wall height",
default=0.10,
min=0.10, max=1.0,
subtype="PERCENTAGE"
)
# narrow openings in wall.
# need to prevent overlap with arch openings - though inversion is an interesting effect.
SlotTog = BoolProperty(
name="Slots",
description="Make narrow openings in wall",
default=False
)
SlotRpt = BoolProperty(
name="Repeat",
description="Repeat slots along wall",
default=False
)
SlotWdg = BoolProperty(
name="Wedged (n/a)",
description="Bevel edges of slots",
default=False
)
SlotX = FloatProperty(
name="Indent",
description="The x position or spacing of slots",
default=0.0, min=-100, max=100.0
)
SlotGap = FloatProperty(
name="Opening",
description="The opening size of slots",
default=0.5, min=0.10, max=100.0
)
SlotV = BoolProperty(
name="Vertical",
description="Vertical slots",
default=True
)
SlotVH = FloatProperty(
name="Height",
description="Height of vertical slot",
default=3.5,
min=0.10, max=100.0
)
SlotVBtm = FloatProperty(
name="Bottom",
description="Z position for slot",
default=5.00,
min=-100.0, max=100.0
)
SlotH = BoolProperty(
name="Horizontal",
description="Horizontal slots",
default=False
)
SlotHW = FloatProperty(
name="Width",
description="Width of horizontal slot",
default=2.5,
min=0.10, max=100.0
)
# this should offset from VBtm... maybe make a % like crenels?
SlotHBtm = FloatProperty(
name="Bottom",
description="Z position for horizontal slot",
default=5.50,
min=-100.0, max=100.0
)
# properties for shelf (extend blocks in area)
ShelfTog = BoolProperty(
name="Shelf",
description="Add blocks in area by depth to make shelf/platform",
default=False
)
ShelfX = FloatProperty(
name="Left",
description="The x position of Shelf",
default=-5.00,
min=-100, max=100.0
)
ShelfZ = FloatProperty(
name="Bottom",
description="The z position of Shelf",
default=10.0,
min=-100, max=100.0
)
ShelfH = FloatProperty(
name="Height",
description="The Height of Shelf area",
default=1.0,
min=0.01, max=100.0
)
ShelfW = FloatProperty(
name="Width",
description="The Width of shelf area",
default=5.0,
min=0.01, max=100.0
)
ShelfD = FloatProperty(
name="Depth",
description="Depth of each block for shelf (from cursor + 1/2 wall depth)",
default=2.0,
min=0.01, max=100.0
)
ShelfBack = BoolProperty(
name="Backside",
description="Shelf on backside of wall",
default=False
)
# properties for steps (extend blocks in area, progressive width)
StepTog = BoolProperty(
name="Steps",
description="Add blocks in area by depth with progressive width to make steps",
default=False
)
StepX = FloatProperty(
name="Left",
description="The x position of steps",
default=-9.00,
min=-100, max=100.0
)
StepZ = FloatProperty(
name="Bottom",
description="The z position of steps",
default=0.0,
min=-100, max=100.0
)
StepH = FloatProperty(
name="Height",
description="The Height of step area",
default=10.0,
min=0.01, max=100.0
)
StepW = FloatProperty(
name="Width",
description="The Width of step area",
default=8.0,
min=0.01, max=100.0
)
StepD = FloatProperty(
name="Depth",
description="Depth of each block for steps (from cursor + 1/2 wall depth)",
default=1.0,
min=0.01, max=100.0
)
StepV = FloatProperty(
name="Riser",
description="Height of each step",
default=0.70,
min=0.01, max=100.0
)
StepT = FloatProperty(
name="Tread",
description="Width of each step",
default=1.0,
min=0.01, max=100.0
)
StepLeft = BoolProperty(
name="Direction",
description="If checked, flip steps direction towards the -X axis",
default=False
)
StepOnly = BoolProperty(
name="Steps Only",
description="Steps only, no supporting blocks",
default=False
)
StepBack = BoolProperty(
name="Backside",
description="Steps on backside | |
JOB_RESTAURANT_DINER: 'CommonGameTag' = 2145
JOB_VENUE: 'CommonGameTag' = 1464
JOB_VET_PATIENT: 'CommonGameTag' = 57442
JOB_WALKBY: 'CommonGameTag' = 1463
LIFESTYLES_DANGEROUS_CAREER: 'CommonGameTag' = 69711
LIFESTYLES_HIGH_ENERGY_CAREER: 'CommonGameTag' = 69683
LIFESTYLES_INDOORSY_CAREER: 'CommonGameTag' = 69733
LIFESTYLES_LOW_ENERGY_CAREER: 'CommonGameTag' = 69684
LIFESTYLES_OUTDOORSY_CAREER: 'CommonGameTag' = 69721
MAILBOX: 'CommonGameTag' = 346
MAIN_PET_SOCIAL: 'CommonGameTag' = 57349
MENTOR_ACTIVITY_TABLE: 'CommonGameTag' = 588
MENTOR_EASEL: 'CommonGameTag' = 365
MENTOR_FITNESS: 'CommonGameTag' = 357
MENTOR_GUITAR: 'CommonGameTag' = 361
MENTOR_MURAL: 'CommonGameTag' = 55398
MENTOR_PIANO: 'CommonGameTag' = 362
MENTOR_REPAIR: 'CommonGameTag' = 765
MENTOR_TREADMILL: 'CommonGameTag' = 355
MENTOR_UPGRADE: 'CommonGameTag' = 766
MENTOR_VIOLIN: 'CommonGameTag' = 363
MENTOR_WOODWORKING_TABLE: 'CommonGameTag' = 764
MENTOR_WORKOUT_MACHINE: 'CommonGameTag' = 356
MICROSCOPE_SLIDE_CRYSTAL: 'CommonGameTag' = 344
MICROSCOPE_SLIDE_FOSSIL: 'CommonGameTag' = 343
MICROSCOPE_SLIDE_PLANT: 'CommonGameTag' = 345
MOOD_ANGRY: 'CommonGameTag' = 317
MOOD_BORED: 'CommonGameTag' = 318
MOOD_CONFIDENT: 'CommonGameTag' = 319
MOOD_CRANKY: 'CommonGameTag' = 320
MOOD_DEPRESSED: 'CommonGameTag' = 321
MOOD_DRUNK: 'CommonGameTag' = 322
MOOD_EMBARRASSED: 'CommonGameTag' = 323
MOOD_ENERGIZED: 'CommonGameTag' = 324
MOOD_FINE: 'CommonGameTag' = 331
MOOD_FLIRTY: 'CommonGameTag' = 325
MOOD_FOCUSED: 'CommonGameTag' = 326
MOOD_HAPPY: 'CommonGameTag' = 328
MOOD_IMAGINATIVE: 'CommonGameTag' = 329
MOOD_OPTIMISM: 'CommonGameTag' = 64
MOOD_PLAYFUL: 'CommonGameTag' = 332
MOOD_SAD: 'CommonGameTag' = 333
MOOD_SLOSHED: 'CommonGameTag' = 334
MOOD_TENSE: 'CommonGameTag' = 327
MOOD_UNCOMFORTABLE: 'CommonGameTag' = 330
NONE_EP03_PLEASE_REUSE_ME: 'CommonGameTag' = 24592
NOSE_COLOR_BLACK: 'CommonGameTag' = 1917
NOSE_COLOR_BLACK_PINK: 'CommonGameTag' = 1922
NOSE_COLOR_BROWN: 'CommonGameTag' = 1918
NOSE_COLOR_BROWN_PINK: 'CommonGameTag' = 1923
NOSE_COLOR_LIVER: 'CommonGameTag' = 1919
NOSE_COLOR_PINK: 'CommonGameTag' = 1920
NOSE_COLOR_TAN: 'CommonGameTag' = 1921
NUDE_PART_ALWAYS: 'CommonGameTag' = 1540
NUDE_PART_MALE_WITH_BREAST: 'CommonGameTag' = 1541
OBJECT_BAR: 'CommonGameTag' = 349
OBJECT_MURAL: 'CommonGameTag' = 55363
OCCULT_ALIEN: 'CommonGameTag' = 12319
OCCULT_HUMAN: 'CommonGameTag' = 1310
OCCULT_MERMAID: 'CommonGameTag' = 2208
OCCULT_VAMPIRE: 'CommonGameTag' = 1677
OCCULT_WITCH: 'CommonGameTag' = 2279
OUTFIT_ART_CRITIC_LEVEL10: 'CommonGameTag' = 55393
OUTFIT_ARTS_CRITIC: 'CommonGameTag' = 55301
OUTFIT_CATEGORY_ATHLETIC: 'CommonGameTag' = 80
OUTFIT_CATEGORY_BATHING: 'CommonGameTag' = 82
OUTFIT_CATEGORY_BATUU = 2470
OUTFIT_CATEGORY_CAREER: 'CommonGameTag' = 263
OUTFIT_CATEGORY_COLD_WEATHER: 'CommonGameTag' = 2054
OUTFIT_CATEGORY_EVERYDAY: 'CommonGameTag' = 77
OUTFIT_CATEGORY_FORMAL: 'CommonGameTag' = 78
OUTFIT_CATEGORY_HOT_WEATHER: 'CommonGameTag' = 2053
OUTFIT_CATEGORY_PARTY: 'CommonGameTag' = 83
OUTFIT_CATEGORY_RETAIL_UNIFORMS: 'CommonGameTag' = 1371
OUTFIT_CATEGORY_SITUATION: 'CommonGameTag' = 335
OUTFIT_CATEGORY_SLEEP: 'CommonGameTag' = 81
OUTFIT_CATEGORY_SWIMWEAR: 'CommonGameTag' = 1229
OUTFIT_CATEGORY_UNUSED: 'CommonGameTag' = 79
OUTFIT_CATEGORY_WITCH: 'CommonGameTag' = 8210
OUTFIT_FOOD_CRITIC: 'CommonGameTag' = 55300
OUTFIT_FOOD_CRITIC_LEVEL10: 'CommonGameTag' = 55394
PATTERN_ANIMAL: 'CommonGameTag' = 590
PATTERN_BICOLOR: 'CommonGameTag' = 1905
PATTERN_BRINDLE: 'CommonGameTag' = 1902
PATTERN_CALICO: 'CommonGameTag' = 1912
PATTERN_HARLEQUIN: 'CommonGameTag' = 1909
PATTERN_MERLE: 'CommonGameTag' = 1907
PATTERN_SABLE: 'CommonGameTag' = 1910
PATTERN_SADDLE: 'CommonGameTag' = 1903
PATTERN_SPECKLED: 'CommonGameTag' = 1913
PATTERN_SPOTTED: 'CommonGameTag' = 1900
PATTERN_STRIPED: 'CommonGameTag' = 1901
PATTERN_SWIRLED: 'CommonGameTag' = 1904
PATTERN_TABBY: 'CommonGameTag' = 1899
PATTERN_TRICOLOR: 'CommonGameTag' = 1906
PATTERN_TUXEDO: 'CommonGameTag' = 1908
PERSONA_BOHO: 'CommonGameTag' = 130
PERSONA_FASHIONISTA: 'CommonGameTag' = 129
PERSONA_MOM: 'CommonGameTag' = 148
PERSONA_ROCKER: 'CommonGameTag' = 128
PORTAL_DISALLOWANCE_MASCOT: 'CommonGameTag' = 69745
PORTAL_DISALLOWANCE_UNGREETED: 'CommonGameTag' = 668
POSTURE_LIFESTYLES_RELAXED_SIT: 'CommonGameTag' = 69695
RECIPE_CANDLE_MAKING_STATION_CANDLE: 'CommonGameTag' = 67604
RECIPE_CATEGORY_CAKE_PIE: 'CommonGameTag' = 1536
RECIPE_CATEGORY_CHOCOLATE: 'CommonGameTag' = 1537
RECIPE_CATEGORY_COLD: 'CommonGameTag' = 1533
RECIPE_CATEGORY_DRINKS: 'CommonGameTag' = 1518
RECIPE_CATEGORY_FIZZY: 'CommonGameTag' = 1531
RECIPE_CATEGORY_FRUIT: 'CommonGameTag' = 1532
RECIPE_CATEGORY_GRAINS: 'CommonGameTag' = 1515
RECIPE_CATEGORY_HOT: 'CommonGameTag' = 1534
RECIPE_CATEGORY_MEAT: 'CommonGameTag' = 1513
RECIPE_CATEGORY_MISC: 'CommonGameTag' = 1517
RECIPE_CATEGORY_NECTAR: 'CommonGameTag' = 1535
RECIPE_CATEGORY_SEAFOOD: 'CommonGameTag' = 1519
RECIPE_CATEGORY_SWEETS: 'CommonGameTag' = 1516
RECIPE_CATEGORY_VEGETARIAN: 'CommonGameTag' = 1514
RECIPE_CATEGORY_WATER: 'CommonGameTag' = 1522
RECIPE_CAULDRON_POTION: 'CommonGameTag' = 49154
RECIPE_CHEFS_CHOICE_CHILD_FRIENDLY: 'CommonGameTag' = 1521
RECIPE_CHILD_RESTRICTED: 'CommonGameTag' = 1523
RECIPE_COURSE_APPETIZER: 'CommonGameTag' = 1507
RECIPE_COURSE_DESSERT: 'CommonGameTag' = 1509
RECIPE_COURSE_DRINK: 'CommonGameTag' = 1524
RECIPE_COURSE_MAIN: 'CommonGameTag' = 1508
RECIPE_FLOWER_ARRANGEMENT: 'CommonGameTag' = 59472
RECIPE_MEAL_BREAKFAST: 'CommonGameTag' = 1510
RECIPE_MEAL_DINNER: 'CommonGameTag' = 1512
RECIPE_MEAL_LUNCH: 'CommonGameTag' = 1511
RECIPE_PLOPSY_BROWSER: 'CommonGameTag' = 83985
RECIPE_TYPE_DRINK: 'CommonGameTag' = 1506
RECIPE_TYPE_DRINK_PRANK: 'CommonGameTag' = 2423
RECIPE_TYPE_FOOD: 'CommonGameTag' = 1505
RECIPE_TYPE_PET_DRINK: 'CommonGameTag' = 57425
RECIPE_TYPE_PET_FOOD: 'CommonGameTag' = 57424
REGION_ACTIVE_CAREER: 'CommonGameTag' = 12437
REGION_CAMPING: 'CommonGameTag' = 1245
REGION_JUNGLE: 'CommonGameTag' = 45059
REGION_RESIDENTIAL: 'CommonGameTag' = 1244
REGION_RETAIL: 'CommonGameTag' = 12374
RESERVED_TEMP_BETA_FIX_DO_NOT_USE_1: 'CommonGameTag' = 138
RESERVED_TEMP_BETA_FIX_DO_NOT_USE_2: 'CommonGameTag' = 139
RESERVED_TEMP_BETA_FIX_DO_NOT_USE_3: 'CommonGameTag' = 142
RESERVED_TEMP_BETA_FIX_DO_NOT_USE_4: 'CommonGameTag' = 143
RESERVED_TEMP_BETA_FIX_DO_NOT_USE_5: 'CommonGameTag' = 144
RESERVED_TEMP_BETA_FIX_DO_NOT_USE_6: 'CommonGameTag' = 147
RESERVED_TEMP_BETA_FIX_DO_NOT_USE_7: 'CommonGameTag' = 281
RESERVED_TEMP_BETA_FIX_DO_NOT_USE_8: 'CommonGameTag' = 284
RESERVED_TEMP_BETA_FIX_DO_NOT_USE_9: 'CommonGameTag' = 290
REWARD_CAS_PART: 'CommonGameTag' = 767
ROLE_BAKE_ONE_CAKE: 'CommonGameTag' = 2277
ROLE_BARTENDER: 'CommonGameTag' = 277
ROLE_BUSINESS_CUSTOMER: 'CommonGameTag' = 1924
ROLE_CAREER: 'CommonGameTag' = 467
ROLE_CATERER: 'CommonGameTag' = 278
ROLE_COLLEGE_ORGANIZATION_EVENT: 'CommonGameTag' = 65583
ROLE_COWORKER: 'CommonGameTag' = 12292
ROLE_CUSTOMER: 'CommonGameTag' = 2142
ROLE_DATE: 'CommonGameTag' = 1439
ROLE_DETECTIVE: 'CommonGameTag' = 12294
ROLE_DOCTOR: 'CommonGameTag' = 12295
ROLE_ENTERTAINER: 'CommonGameTag' = 650
ROLE_FESTIVAL_ARTS_CRAFTS: 'CommonGameTag' = 55317
ROLE_FESTIVAL_BLOSSOM: 'CommonGameTag' = 55312
ROLE_FESTIVAL_FLEA_MARKET: 'CommonGameTag' = 55318
ROLE_FESTIVAL_FOOD: 'CommonGameTag' = 55315
ROLE_FESTIVAL_LAMP: 'CommonGameTag' = 55313
ROLE_FESTIVAL_LOGIC: 'CommonGameTag' = 55314
ROLE_FESTIVAL_MUSIC: 'CommonGameTag' = 55316
ROLE_FORTUNE_TELLER: 'CommonGameTag' = 8199
ROLE_GUEST: 'CommonGameTag' = 266
ROLE_HOST: 'CommonGameTag' = 267
ROLE_HOST_AT_STATION: 'CommonGameTag' = 26635
ROLE_LEAVE: 'CommonGameTag' = 418
ROLE_MAID: 'CommonGameTag' = 279
ROLE_RESTAURANT_DINER: 'CommonGameTag' = 2147
ROLE_RESTAURANT_EAT: 'CommonGameTag' = 2148
ROLE_RESTAURANT_POST_PLACE_ORDER: 'CommonGameTag' = 2149
ROLE_RESTAURANT_STAFF: 'CommonGameTag' = 26633
ROLE_ROOMMATE_NPC: 'CommonGameTag' = 65541
ROLE_SCIENTIST: 'CommonGameTag' = 12293
ROLE_SERVICE: 'CommonGameTag' = 416
ROLE_SPA_STAFF_BORED: 'CommonGameTag' = 18441
ROLE_STATE_EP01_PATIENT_TREATED: 'CommonGameTag' = 12434
ROLE_VET_PATIENT: 'CommonGameTag' = 57400
ROLE_VIP_ROPE_ALLOWED: 'CommonGameTag' = 2143
ROLE_YOGA_CLASS_POST_CLASS: 'CommonGameTag' = 18435
ROLE_YOGA_PRE_CLASS: 'CommonGameTag' = 18463
ROYALTY_APPS: 'CommonGameTag' = 908
ROYALTY_BOOKS: 'CommonGameTag' = 909
ROYALTY_GAMES: 'CommonGameTag' = 910
ROYALTY_LYRICS: 'CommonGameTag' = 1629
ROYALTY_PAINTINGS: 'CommonGameTag' = 911
ROYALTY_SONGS: 'CommonGameTag' = 912
SHOES_BOOTIES: 'CommonGameTag' = 383
SHOES_BOOTS: 'CommonGameTag' = 384
SHOES_FLATS: 'CommonGameTag' = 385
SHOES_HEELS: 'CommonGameTag' = 386
SHOES_LACE_UP_ADULT: 'CommonGameTag' = 387
SHOES_LACE_UP_CHILDREN: 'CommonGameTag' = 388
SHOES_LOAFERS: 'CommonGameTag' = 389
SHOES_SANDALS: 'CommonGameTag' = 390
SHOES_SLIPPERS: 'CommonGameTag' = 391
SHOES_SNEAKERS: 'CommonGameTag' = 392
SHOES_WEDGES: 'CommonGameTag' = 393
SICKNESS_CHECK_UP: 'CommonGameTag' = 57407
SICKNESS_CURED_BY_EXAM_TABLE: 'CommonGameTag' = 57451
SICKNESS_CURED_BY_SURGERY_STATION: 'CommonGameTag' = 57452
SICKNESS_ILLNESS: 'CommonGameTag' = 57408
SICKNESS_PET_EXAM: 'CommonGameTag' = 57403
SITUATION_ACTIVE_CAREER: 'CommonGameTag' = 12358
SITUATION_ACTIVE_CAREER_SCIENTIST: 'CommonGameTag' = 12427
SITUATION_ACTOR_CAREER_COMMERCIAL: 'CommonGameTag' = 61553
SITUATION_ACTOR_CAREER_MOVIE: 'CommonGameTag' = 61556
SITUATION_ACTOR_CAREER_PREP_TASK_ACTING: 'CommonGameTag' = 61615
SITUATION_ACTOR_CAREER_PREP_TASK_CHARISMA: 'CommonGameTag' = 61458
SITUATION_ACTOR_CAREER_PREP_TASK_CO_STAR_REL: 'CommonGameTag' = 61454
SITUATION_ACTOR_CAREER_PREP_TASK_COMEDY: 'CommonGameTag' = 61456
SITUATION_ACTOR_CAREER_PREP_TASK_DIRECTOR_REL: 'CommonGameTag' = 61455
SITUATION_ACTOR_CAREER_PREP_TASK_FITNESS: 'CommonGameTag' = 61459
SITUATION_ACTOR_CAREER_PREP_TASK_GUITAR: 'CommonGameTag' = 61460
SITUATION_ACTOR_CAREER_PREP_TASK_HANDINESS: 'CommonGameTag' = 61457
SITUATION_ACTOR_CAREER_PREP_TASK_PRACTICE_ACTION: 'CommonGameTag' = 61619
SITUATION_ACTOR_CAREER_PREP_TASK_PRACTICE_DRAMATIC: 'CommonGameTag' = 61620
SITUATION_ACTOR_CAREER_PREP_TASK_PRACTICE_ROMANTIC: 'CommonGameTag' = 61621
SITUATION_ACTOR_CAREER_PREP_TASK_RESEARCH_FLIRTY: 'CommonGameTag' = 61616
SITUATION_ACTOR_CAREER_PREP_TASK_RESEARCH_FUNNY: 'CommonGameTag' = 61617
SITUATION_ACTOR_CAREER_PREP_TASK_RESEARCH_MEAN: 'CommonGameTag' = 61618
SITUATION_ACTOR_CAREER_TV_HIGH: 'CommonGameTag' = 61555
SITUATION_ACTOR_CAREER_TV_LOW: 'CommonGameTag' = 61554
SITUATION_APARTMENT_NEIGHBOR_ANSWER_DOOR_COMPLAINT: 'CommonGameTag' = 55304
SITUATION_APARTMENT_NEIGHBOR_LOUD_NOISES: 'CommonGameTag' = 55303
SITUATION_BASKET_BALLER_A: 'CommonGameTag' = 55381
SITUATION_BASKET_BALLER_B: 'CommonGameTag' = 55382
SITUATION_BATUU_ARREST: 'CommonGameTag' = 51231
SITUATION_BATUU_FR13_MISSION: 'CommonGameTag' = 51278
SITUATION_BATUU_FS2_MISSION: 'CommonGameTag' = 51263
SITUATION_BATUU_FS3_MISSION: 'CommonGameTag' = 51264
SITUATION_BATUU_FS4_CRIMINAL: 'CommonGameTag' = 51255
SITUATION_BATUU_FS6_MISSION: 'CommonGameTag' = 51262
SITUATION_BATUU_FS7_MISSION: 'CommonGameTag' = 51261
SITUATION_BATUU_INSPECTION: 'CommonGameTag' = 51232
SITUATION_BATUU_MISSION_LIGHTSABER: 'CommonGameTag' = 51241
SITUATION_BATUU_OGAS_CELEBRATION_BLACKLISTED: 'CommonGameTag' = 51243
SITUATION_BATUU_RS2_MISSION: 'CommonGameTag' = 51272
SITUATION_BATUU_RS4_MISSION: 'CommonGameTag' = 51269
SITUATION_BATUU_RS6_MISSION: 'CommonGameTag' = 51273
SITUATION_BATUU_RS7_MISSION: 'CommonGameTag' = 51274
SITUATION_BATUU_SABACC_OPPONENT_1: 'CommonGameTag' = 51258
SITUATION_BATUU_SABACC_OPPONENT_2: 'CommonGameTag' = 51259
SITUATION_BATUU_SABACC_OPPONENT_3: 'CommonGameTag' = 51260
SITUATION_BATUU_SR4_MISSION: 'CommonGameTag' = 51275
SITUATION_BATUU_SR9_MISSION: 'CommonGameTag' = 51265
SITUATION_BATUU_SS8_MISSION: 'CommonGameTag' = 51276
SITUATION_BATUU_SS9_MISSION: 'CommonGameTag' = 51277
SITUATION_BEAR: 'CommonGameTag' = 10247
SITUATION_BONFIRE: 'CommonGameTag' = 24586
SITUATION_BOWLING_GROUP: 'CommonGameTag' = 38919
SITUATION_BOWLING_GROUP_2: 'CommonGameTag' = 38920
SITUATION_BOWLING_GROUP_3: 'CommonGameTag' = 38921
SITUATION_BOWLING_GROUP_4: 'CommonGameTag' = 38922
SITUATION_BUSKER: 'CommonGameTag' = 55308
SITUATION_BUTLER: 'CommonGameTag' = 36867
SITUATION_CELEBRITY_FAN: 'CommonGameTag' = 61476
SITUATION_CITY_INVITES: 'CommonGameTag' = 55380
SITUATION_CITY_REPAIR: 'CommonGameTag' = 55355
SITUATION_CLOWN: 'CommonGameTag' = 955
SITUATION_COMPLAINT_NOISE: 'CommonGameTag' = 55425
SITUATION_COOKING_INTERACTIONS: 'CommonGameTag' = 1017
SITUATION_CRIMINAL: 'CommonGameTag' = 956
SITUATION_DANCE_TOGETHER: 'CommonGameTag' = 24606
SITUATION_DJ_PERFORMANCE: 'CommonGameTag' = 24582
SITUATION_EVENT_NPC: 'CommonGameTag' = 1501
SITUATION_FESTIVAL: 'CommonGameTag' = 55401
SITUATION_FESTIVAL_BLOSSOM_ROMANTIC_COUPLE: 'CommonGameTag' = 55390
SITUATION_FESTIVAL_LOGIC_ROCKET_SHIP_WOOHOOERS: 'CommonGameTag' = 55389
SITUATION_FIREFIGHTER: 'CommonGameTag' = 2377
SITUATION_FLOWER_BUNNY: 'CommonGameTag' = 59476
SITUATION_FOREST_GHOST: 'CommonGameTag' = 10259
SITUATION_FOREST_RANGER: 'CommonGameTag' = 10264
SITUATION_GP07_WALKBY_CONSPIRACIST_01: 'CommonGameTag' = 47158
SITUATION_GP07_WALKBY_CONSPIRACIST_02: 'CommonGameTag' = 47159
SITUATION_GP07_WALKBY_CONSPIRACIST_03: 'CommonGameTag' = 47160
SITUATION_GP07_WALKBY_FBI_01: 'CommonGameTag' = 47161
SITUATION_GP07_WALKBY_FBI_02: 'CommonGameTag' = 47162
SITUATION_GP07_WALKBY_FBI_03: 'CommonGameTag' = 47163
SITUATION_GP07_WALKBY_MILITARY_01: 'CommonGameTag' = 47150
SITUATION_GP07_WALKBY_MILITARY_02: 'CommonGameTag' = 47151
SITUATION_GP07_WALKBY_MILITARY_03: 'CommonGameTag' = 47152
SITUATION_GP07_WALKBY_MILITARY_04: 'CommonGameTag' = 47153
SITUATION_GP07_WALKBY_SCIENTIST_01: 'CommonGameTag' = 47154
SITUATION_GP07_WALKBY_SCIENTIST_02: 'CommonGameTag' = 47155
SITUATION_GP07_WALKBY_SCIENTIST_03: 'CommonGameTag' = 47156
SITUATION_GP07_WALKBY_SCIENTIST_04: 'CommonGameTag' = 47157
SITUATION_GARDENER: 'CommonGameTag' = 2152
SITUATION_GNOME_BERSERK: 'CommonGameTag' = 59455
SITUATION_GNOME_NORMAL: 'CommonGameTag' = 59454
SITUATION_GRILL_GROUP: 'CommonGameTag' = 1461
SITUATION_HIKING_TRAIL: 'CommonGameTag' = 69746
SITUATION_HIRED_NANNY: 'CommonGameTag' = 1550
SITUATION_HOLIDAY: 'CommonGameTag' = 59460
SITUATION_HOME_CHEF: 'CommonGameTag' = 26642
SITUATION_HOT_DOG: 'CommonGameTag' = 958
SITUATION_INTRIGUED_NOISE: 'CommonGameTag' = 55426
SITUATION_INTRIGUED_SMELL: 'CommonGameTag' = 55427
SITUATION_ISLAND_SPIRITS: 'CommonGameTag' = 63496
SITUATION_LIVES_ON_STREET_A: 'CommonGameTag' = 55435
SITUATION_LIVES_ON_STREET_B: 'CommonGameTag' = 55436
SITUATION_LIVES_ON_STREET_C: 'CommonGameTag' = 55437
SITUATION_LIVES_ON_STREET_D: 'CommonGameTag' = 55438
SITUATION_MAID: 'CommonGameTag' = 957
SITUATION_MAILMAN: 'CommonGameTag' = 1343
SITUATION_MARKET_STALL_VENDOR: 'CommonGameTag' = 1949
SITUATION_MASTER_FISHERMAN: 'CommonGameTag' = 889
SITUATION_MASTER_GARDENER: 'CommonGameTag' = 890
SITUATION_MURAL_PAINTER: 'CommonGameTag' = 55383
SITUATION_NIGHT_TIME_VISIT: 'CommonGameTag' = 1679
SITUATION_PET_OBSTACLE_COURSE: 'CommonGameTag' = 57427
SITUATION_PICNIC_TABLE: 'CommonGameTag' = 1460
SITUATION_PIZZA: | |
3807 9.07011961409955084755857370765622E-2293 4.53505980704977542377928685382811E-2293
3808 2.267529903524887711889643426914055E-2293 1.133764951762443855944821713457028E-2293
3809 5.66882475881221927972410856728514E-2294 2.83441237940610963986205428364257E-2294
3810 1.417206189703054819931027141821285E-2294 7.086030948515274099655135709106425E-2295
3811 3.543015474257637049827567854553213E-2295 1.771507737128818524913783927276606E-2295
3812 8.85753868564409262456891963638303E-2296 4.428769342822046312284459818191515E-2296
3813 2.214384671411023156142229909095758E-2296 1.107192335705511578071114954547879E-2296
3814 5.535961678527557890355574772739395E-2297 2.767980839263778945177787386369698E-2297
3815 1.383990419631889472588893693184849E-2297 6.919952098159447362944468465924245E-2298
3816 3.459976049079723681472234232962123E-2298 1.729988024539861840736117116481061E-2298
3817 8.649940122699309203680585582405305E-2299 4.324970061349654601840292791202653E-2299
3818 2.162485030674827300920146395601327E-2299 1.081242515337413650460073197800663E-2299
3819 5.406212576687068252300365989003315E-2300 2.703106288343534126150182994501658E-2300
3820 1.351553144171767063075091497250829E-2300 6.757765720858835315375457486254145E-2301
3821 3.378882860429417657687728743127073E-2301 1.689441430214708828843864371563536E-2301
3822 8.44720715107354414421932185781768E-2302 4.22360357553677207210966092890884E-2302
3823 2.11180178776838603605483046445442E-2302 1.05590089388419301802741523222721E-2302
3824 5.27950446942096509013707616113605E-2303 2.639752234710482545068538080568025E-2303
3825 1.319876117355241272534269040284013E-2303 6.599380586776206362671345201420063E-2304
3826 3.299690293388103181335672600710032E-2304 1.649845146694051590667836300355016E-2304
3827 8.24922573347025795333918150177508E-2305 4.12461286673512897666959075088754E-2305
3828 2.06230643336756448833479537544377E-2305 1.031153216683782244167397687721885E-2305
3829 5.155766083418911220836988438609425E-2306 2.577883041709455610418494219304713E-2306
3830 1.288941520854727805209247109652357E-2306 6.444707604273639026046235548261783E-2307
3831 3.222353802136819513023117774130892E-2307 1.611176901068409756511558887065446E-2307
3832 8.05588450534204878255779443532723E-2308 4.027942252671024391278897217663615E-2308
3833 2.013971126335512195639448608831808E-2308 1.006985563167756097819724304415904E-2308
3834 5.03492781583878048909862152207952E-2309 2.51746390791939024454931076103976E-2309
3835 1.25873195395969512227465538051988E-2309 6.2936597697984756113732769025994E-2310
3836 3.1468298848992378056866384512997E-2310 1.57341494244961890284331922564985E-2310
3837 7.86707471224809451421659612824925E-2311 3.933537356124047257108298064124625E-2311
3838 1.966768678062023628554149032062313E-2311 9.833843390310118142770745160311563E-2312
3839 4.916921695155059071385372580155782E-2312 2.458460847577529535692686290077891E-2312
3840 1.229230423788764767846343145038946E-2312 6.146152118943823839231715725194728E-2313
3841 3.073076059471911919615857862597364E-2313 1.536538029735955959807928931298682E-2313
3842 7.68269014867977979903964465649341E-2314 3.841345074339889899519822328246705E-2314
3843 1.920672537169944949759911164123353E-2314 9.603362685849724748799555820616763E-2315
3844 4.801681342924862374399777910308382E-2315 2.400840671462431187199888955154191E-2315
3845 1.200420335731215593599944477577096E-2315 6.002101678656077967999722387885478E-2316
3846 3.001050839328038983999861193942739E-2316 1.500525419664019491999930596971370E-2316
3847 7.50262709832009745999965298485685E-2317 3.751313549160048729999826492428425E-2317
3848 1.875656774580024364999913246214213E-2317 9.378283872900121824999566231071063E-2318
3849 4.689141936450060912499783115535532E-2318 2.344570968225030456249891557767766E-2318
3850 1.172285484112515228124945778883883E-2318 5.861427420562576140624728894419415E-2319
3851 2.930713710281288070312364447209708E-2319 1.465356855140644035156182223604854E-2319
3852 7.32678427570322017578091111802427E-2320 3.663392137851610087890455559012135E-2320
3853 1.831696068925805043945227779506068E-2320 9.158480344629025219726138897530338E-2321
3854 4.579240172314512609863069448765169E-2321 2.289620086157256304931534724382585E-2321
3855 1.144810043078628152465767362191293E-2321 5.724050215393140762328836810956463E-2322
3856 2.862025107696570381164418405478232E-2322 1.431012553848285190582209202739116E-2322
3857 7.15506276924142595291104601369558E-2323 3.57753138462071297645552300684779E-2323
3858 1.788765692310356488227761503423895E-2323 8.943828461551782441138807517119475E-2324
3859 4.471914230775891220569403758559738E-2324 2.235957115387945610284701879279869E-2324
3860 1.117978557693972805142350939639935E-2324 5.589892788469864025711754698199673E-2325
3861 2.794946394234932012855877349099837E-2325 1.397473197117466006427938674549918E-2325
3862 6.98736598558733003213969337274959E-2326 3.493682992793665016069846686374795E-2326
3863 1.746841496396832508034923343187398E-2326 8.734207481984162540174616715936988E-2327
3864 4.367103740992081270087308357968494E-2327 2.183551870496040635043654178984247E-2327
3865 1.091775935248020317521827089492124E-2327 5.458879676240101587609135447460618E-2328
3866 2.729439838120050793804567723730309E-2328 1.364719919060025396902283861865155E-2328
3867 6.823599595300126984511419309325775E-2329 3.411799797650063492255709654662888E-2329
3868 1.705899898825031746127854827331444E-2329 8.52949949412515873063927413665722E-2330
3869 4.26474974706257936531963706832861E-2330 2.132374873531289682659818534164305E-2330
3870 1.066187436765644841329909267082153E-2330 5.330937183828224206649546335410763E-2331
3871 2.665468591914112103324773167705382E-2331 1.332734295957056051662386583852691E-2331
3872 6.663671479785280258311932919263455E-2332 3.331835739892640129155966459631728E-2332
3873 1.665917869946320064577983229815864E-2332 8.32958934973160032288991614907932E-2333
3874 4.16479467486580016144495807453966E-2333 2.08239733743290008072247903726983E-2333
3875 1.041198668716450040361239518634915E-2333 5.205993343582250201806197593174575E-2334
3876 2.602996671791125100903098796587288E-2334 1.301498335895562550451549398293644E-2334
3877 6.50749167947781275225774699146822E-2335 3.25374583973890637612887349573411E-2335
3878 1.626872919869453188064436747867055E-2335 8.134364599347265940322183739335275E-2336
3879 4.067182299673632970161091869667638E-2336 2.033591149836816485080545934833819E-2336
3880 1.016795574918408242540272967416910E-2336 5.083977874592041212701364837084548E-2337
3881 2.541988937296020606350682418542274E-2337 1.270994468648010303175341209271137E-2337
3882 6.354972343240051515876706046355685E-2338 3.177486171620025757938353023177843E-2338
3883 1.588743085810012878969176511588922E-2338 7.943715429050064394845882557944608E-2339
3884 3.971857714525032197422941278972304E-2339 1.985928857262516098711470639486152E-2339
3885 9.92964428631258049355735319743076E-2340 4.96482214315629024677867659871538E-2340
3886 2.48241107157814512338933829935769E-2340 1.241205535789072561694669149678845E-2340
3887 6.206027678945362808473345748394225E-2341 3.103013839472681404236672874197113E-2341
3888 1.551506919736340702118336437098557E-2341 7.757534598681703510591682185492783E-2342
3889 3.878767299340851755295841092746392E-2342 1.939383649670425877647920546373196E-2342
3890 9.69691824835212938823960273186598E-2343 4.84845912417606469411980136593299E-2343
3891 2.424229562088032347059900682966495E-2343 1.212114781044016173529950341483248E-2343
3892 6.06057390522008086764975170741624E-2344 3.03028695261004043382487585370812E-2344
3893 1.51514347630502021691243792685406E-2344 7.5757173815251010845621896342703E-2345
3894 3.78785869076255054228109481713515E-2345 1.893929345381275271140547408567575E-2345
3895 9.469646726906376355702737042837875E-2346 4.734823363453188177851368521418938E-2346
3896 2.367411681726594088925684260709469E-2346 1.183705840863297044462842130354735E-2346
3897 5.918529204316485222314210651773675E-2347 2.959264602158242611157105325886838E-2347
3898 1.479632301079121305578552662943419E-2347 7.398161505395606527892763314717095E-2348
3899 3.699080752697803263946381657358548E-2348 1.849540376348901631973190828679274E-2348
3900 9.24770188174450815986595414339637E-2349 4.623850940872254079932977071698185E-2349
3901 2.311925470436127039966488535849093E-2349 1.155962735218063519983244267924546E-2349
3902 5.77981367609031759991622133962273E-2350 2.889906838045158799958110669811365E-2350
3903 1.444953419022579399979055334905683E-2350 7.224767095112896999895276674528413E-2351
3904 3.612383547556448499947638337264207E-2351 1.806191773778224249973819168632103E-2351
3905 9.030958868891121249869095843160515E-2352 4.515479434445560624934547921580258E-2352
3906 2.257739717222780312467273960790129E-2352 1.128869858611390156233636980395065E-2352
3907 5.644349293056950781168184901975325E-2353 2.822174646528475390584092450987663E-2353
3908 1.411087323264237695292046225493832E-2353 7.055436616321188476460231127469158E-2354
3909 3.527718308160594238230115563734579E-2354 1.763859154080297119115057781867290E-2354
3910 8.81929577040148559557528890933645E-2355 4.409647885200742797787644454668225E-2355
3911 2.204823942600371398893822227334113E-2355 1.102411971300185699446911113667056E-2355
3912 5.51205985650092849723455556833528E-2356 2.75602992825046424861727778416764E-2356
3913 1.37801496412523212430863889208382E-2356 6.8900748206261606215431944604191E-2357
3914 3.44503741031308031077159723020955E-2357 1.722518705156540155385798615104775E-2357
3915 8.612593525782700776928993075523875E-2358 4.306296762891350388464496537761938E-2358
3916 2.153148381445675194232248268880969E-2358 1.076574190722837597116124134440485E-2358
3917 5.382870953614187985580620672202425E-2359 2.691435476807093992790310336101213E-2359
3918 1.345717738403546996395155168050607E-2359 6.728588692017734981975775840253033E-2360
3919 3.364294346008867490987887920126517E-2360 1.682147173004433745493943960063258E-2360
3920 8.41073586502216872746971980031629E-2361 4.205367932511084363734859900158145E-2361
3921 2.102683966255542181867429950079073E-2361 1.051341983127771090933714975039536E-2361
3922 5.25670991563885545466857487519768E-2362 2.62835495781942772733428743759884E-2362
3923 1.31417747890971386366714371879942E-2362 6.5708873945485693183357185939971E-2363
3924 3.28544369727428465916785929699855E-2363 1.642721848637142329583929648499275E-2363
3925 8.213609243185711647919648242496375E-2364 4.106804621592855823959824121248188E-2364
3926 2.053402310796427911979912060624094E-2364 1.026701155398213955989956030312047E-2364
3927 5.133505776991069779949780151560235E-2365 2.566752888495534889974890075780118E-2365
3928 1.283376444247767444987445037890059E-2365 6.416882221238837224937225189450295E-2366
3929 3.208441110619418612468612594725148E-2366 1.604220555309709306234306297362574E-2366
3930 8.02110277654854653117153148681287E-2367 4.010551388274273265585765743406435E-2367
3931 2.005275694137136632792882871703218E-2367 1.002637847068568316396441435851609E-2367
3932 5.013189235342841581982207179258045E-2368 2.506594617671420790991103589629023E-2368
3933 1.253297308835710395495551794814512E-2368 6.266486544178551977477758974072558E-2369
3934 3.133243272089275988738879487036279E-2369 1.566621636044637994369439743518140E-2369
3935 7.83310818022318997184719871759070E-2370 3.91655409011159498592359935879535E-2370
3936 1.958277045055797492961799679397675E-2370 9.791385225278987464808998396988375E-2371
3937 4.895692612639493732404499198494188E-2371 2.447846306319746866202249599247094E-2371
3938 1.223923153159873433101124799623547E-2371 6.119615765799367165505623998117735E-2372
3939 3.059807882899683582752811999058868E-2372 1.529903941449841791376405999529434E-2372
3940 7.64951970724920895688202999764717E-2373 3.824759853624604478441014998823585E-2373
3941 1.912379926812302239220507499411793E-2373 9.561899634061511196102537497058963E-2374
3942 4.780949817030755598051268748529482E-2374 2.390474908515377799025634374264741E-2374
3943 1.195237454257688899512817187132371E-2374 5.976187271288444497564085935661853E-2375
3944 2.988093635644222248782042967830927E-2375 1.494046817822111124391021483915463E-2375
3945 7.470234089110555621955107419577315E-2376 3.735117044555277810977553709788658E-2376
3946 1.867558522277638905488776854894329E-2376 9.337792611388194527443884274471645E-2377
3947 4.668896305694097263721942137235823E-2377 2.334448152847048631860971068617911E-2377
3948 1.167224076423524315930485534308956E-2377 5.836120382117621579652427671544778E-2378
3949 2.918060191058810789826213835772389E-2378 1.459030095529405394913106917886195E-2378
3950 7.295150477647026974565534589430975E-2379 3.647575238823513487282767294715488E-2379
3951 1.823787619411756743641383647357744E-2379 9.11893809705878371820691823678872E-2380
3952 4.55946904852939185910345911839436E-2380 2.27973452426469592955172955919718E-2380
3953 1.13986726213234796477586477959859E-2380 5.69933631066173982387932389799295E-2381
3954 2.849668155330869911939661948996475E-2381 1.424834077665434955969830974498238E-2381
3955 7.12417038832717477984915487249119E-2382 3.562085194163587389924577436245595E-2382
3956 1.781042597081793694962288718122798E-2382 8.905212985408968474811443590613988E-2383
3957 4.452606492704484237405721795306994E-2383 2.226303246352242118702860897653497E-2383
3958 1.113151623176121059351430448826749E-2383 5.565758115880605296757152244133743E-2384
3959 2.782879057940302648378576122066872E-2384 1.391439528970151324189288061033436E-2384
3960 6.95719764485075662094644030516718E-2385 3.47859882242537831047322015258359E-2385
3961 1.739299411212689155236610076291795E-2385 8.696497056063445776183050381458975E-2386
3962 4.348248528031722888091525190729488E-2386 2.174124264015861444045762595364744E-2386
3963 1.087062132007930722022881297682372E-2386 5.43531066003965361011440648841186E-2387
3964 2.71765533001982680505720324420593E-2387 1.358827665009913402528601622102965E-2387
3965 6.794138325049567012643008110514825E-2388 3.397069162524783506321504055257413E-2388
3966 1.698534581262391753160752027628707E-2388 8.492672906311958765803760138143533E-2389
3967 4.246336453155979382901880069071767E-2389 2.123168226577989691450940034535883E-2389
3968 1.061584113288994845725470017267942E-2389 5.307920566444974228627350086339708E-2390
3969 2.653960283222487114313675043169854E-2390 1.326980141611243557156837521584927E-2390
3970 6.634900708056217785784187607924635E-2391 3.317450354028108892892093803962318E-2391
3971 1.658725177014054446446046901981159E-2391 8.293625885070272232230234509905795E-2392
3972 4.146812942535136116115117254952898E-2392 2.073406471267568058057558627476449E-2392
3973 1.036703235633784029028779313738225E-2392 5.183516178168920145143896568691123E-2393
3974 2.591758089084460072571948284345562E-2393 1.295879044542230036285974142172781E-2393
3975 6.479395222711150181429870710863905E-2394 3.239697611355575090714935355431953E-2394
3976 1.619848805677787545357467677715977E-2394 8.099244028388937726787338388579883E-2395
3977 4.049622014194468863393669194289942E-2395 2.024811007097234431696834597144971E-2395
3978 1.012405503548617215848417298572486E-2395 5.062027517743086079242086492862428E-2396
3979 2.531013758871543039621043246431214E-2396 1.265506879435771519810521623215607E-2396
3980 6.327534397178857599052608116078035E-2397 3.163767198589428799526304058039018E-2397
3981 1.581883599294714399763152029019509E-2397 7.909417996473571998815760145097545E-2398
3982 3.954708998236785999407880072548773E-2398 1.977354499118392999703940036274386E-2398
3983 9.88677249559196499851970018137193E-2399 4.943386247795982499259850090685965E-2399
3984 2.471693123897991249629925045342983E-2399 1.235846561948995624814962522671491E-2399
3985 6.179232809744978124074812613357455E-2400 3.089616404872489062037406306678728E-2400
3986 1.544808202436244531018703153339364E-2400 7.72404101218122265509351576669682E-2401
3987 3.86202050609061132754675788334841E-2401 1.931010253045305663773378941674205E-2401
3988 9.655051265226528318866894708371025E-2402 4.827525632613264159433447354185513E-2402
3989 2.413762816306632079716723677092757E-2402 1.206881408153316039858361838546378E-2402
3990 6.03440704076658019929180919273189E-2403 3.017203520383290099645904596365945E-2403
3991 1.508601760191645049822952298182973E-2403 7.543008800958225249114761490914863E-2404
3992 3.771504400479112624557380745457432E-2404 1.885752200239556312278690372728716E-2404
3993 9.42876100119778156139345186364358E-2405 4.71438050059889078069672593182179E-2405
3994 2.357190250299445390348362965910895E-2405 1.178595125149722695174181482955448E-2405
3995 5.89297562574861347587090741477724E-2406 2.94648781287430673793545370738862E-2406
3996 1.47324390643715336896772685369431E-2406 7.36621953218576684483863426847155E-2407
3997 3.683109766092883422419317134235775E-2407 1.841554883046441711209658567117888E-2407
3998 9.20777441523220855604829283558944E-2408 4.60388720761610427802414641779472E-2408
3999 2.30194360380805213901207320889736E-2408 1.15097180190402606950603660444868E-2408
4000 5.7548590095201303475301830222434E-2409 2.8774295047600651737650915111217E-2409
4001 1.43871475238003258688254575556085E-2409 7.19357376190016293441272877780425E-2410
4002 3.596786880950081467206364388902125E-2410 1.798393440475040733603182194451063E-2410
4003 8.991967202375203668015910972255315E-2411 4.495983601187601834007955486127658E-2411
4004 2.247991800593800917003977743063829E-2411 1.123995900296900458501988871531915E-2411
4005 5.619979501484502292509944357659575E-2412 2.809989750742251146254972178829788E-2412
4006 1.404994875371125573127486089414894E-2412 7.02497437685562786563743044707447E-2413
4007 3.512487188427813932818715223537235E-2413 1.756243594213906966409357611768618E-2413
4008 8.78121797106953483204678805884309E-2414 4.390608985534767416023394029421545E-2414
4009 2.195304492767383708011697014710773E-2414 1.097652246383691854005848507355386E-2414
4010 5.48826123191845927002924253677693E-2415 2.744130615959229635014621268388465E-2415
4011 1.372065307979614817507310634194233E-2415 6.860326539898074087536553170971163E-2416
4012 3.430163269949037043768276585485582E-2416 1.715081634974518521884138292742791E-2416
4013 8.575408174872592609420691463713955E-2417 4.287704087436296304710345731856978E-2417
4014 2.143852043718148152355172865928489E-2417 1.071926021859074076177586432964245E-2417
4015 5.359630109295370380887932164821225E-2418 2.679815054647685190443966082410613E-2418
4016 1.339907527323842595221983041205307E-2418 6.699537636619212976109915206026533E-2419
4017 3.349768818309606488054957603013267E-2419 1.674884409154803244027478801506633E-2419
4018 8.374422045774016220137394007533165E-2420 4.187211022887008110068697003766583E-2420
4019 2.093605511443504055034348501883292E-2420 1.046802755721752027517174250941646E-2420
4020 5.23401377860876013758587125470823E-2421 2.617006889304380068792935627354115E-2421
4021 1.308503444652190034396467813677058E-2421 6.542517223260950171982339068385288E-2422
4022 3.271258611630475085991169534192644E-2422 1.635629305815237542995584767096322E-2422
4023 8.17814652907618771497792383548161E-2423 4.089073264538093857488961917740805E-2423
4024 2.044536632269046928744480958870403E-2423 1.022268316134523464372240479435201E-2423
4025 5.111341580672617321861202397176005E-2424 2.555670790336308660930601198588003E-2424
4026 1.277835395168154330465300599294002E-2424 6.389176975840771652326502996470008E-2425
4027 3.194588487920385826163251498235004E-2425 1.597294243960192913081625749117502E-2425
4028 7.98647121980096456540812874558751E-2426 3.993235609900482282704064372793755E-2426
4029 1.996617804950241141352032186396878E-2426 9.983089024751205706760160931984388E-2427
4030 4.991544512375602853380080465992194E-2427 2.495772256187801426690040232996097E-2427
4031 1.247886128093900713345020116498049E-2427 6.239430640469503566725100582490243E-2428
4032 3.119715320234751783362550291245122E-2428 1.559857660117375891681275145622561E-2428
4033 7.799288300586879458406375728112805E-2429 3.899644150293439729203187864056403E-2429
4034 1.949822075146719864601593932028202E-2429 9.749110375733599323007969660141008E-2430
4035 4.874555187866799661503984830070504E-2430 2.437277593933399830751992415035252E-2430
4036 1.218638796966699915375996207517626E-2430 6.09319398483349957687998103758813E-2431
4037 3.046596992416749788439990518794065E-2431 1.523298496208374894219995259397033E-2431
4038 7.616492481041874471099976296985165E-2432 3.808246240520937235549988148492583E-2432
4039 1.904123120260468617774994074246292E-2432 9.520615601302343088874970371231458E-2433
4040 4.760307800651171544437485185615729E-2433 2.380153900325585772218742592807865E-2433
4041 1.190076950162792886109371296403933E-2433 5.950384750813964430546856482019663E-2434
4042 2.975192375406982215273428241009832E-2434 1.487596187703491107636714120504916E-2434
4043 7.43798093851745553818357060252458E-2435 3.71899046925872776909178530126229E-2435
4044 1.859495234629363884545892650631145E-2435 9.297476173146819422729463253155725E-2436
4045 4.648738086573409711364731626577863E-2436 2.324369043286704855682365813288931E-2436
4046 1.162184521643352427841182906644466E-2436 5.810922608216762139205914533222328E-2437
4047 2.905461304108381069602957266611164E-2437 1.452730652054190534801478633305582E-2437
4048 7.26365326027095267400739316652791E-2438 3.631826630135476337003696583263955E-2438
4049 1.815913315067738168501848291631978E-2438 9.079566575338690842509241458159888E-2439
4050 4.539783287669345421254620729079944E-2439 2.269891643834672710627310364539972E-2439
4051 1.134945821917336355313655182269986E-2439 5.67472910958668177656827591134993E-2440
4052 2.837364554793340888284137955674965E-2440 1.418682277396670444142068977837483E-2440
4053 7.093411386983352220710344889187415E-2441 3.546705693491676110355172444593708E-2441
4054 1.773352846745838055177586222296854E-2441 8.86676423372919027588793111148427E-2442
4055 4.433382116864595137943965555742135E-2442 2.216691058432297568971982777871068E-2442
4056 1.108345529216148784485991388935534E-2442 5.54172764608074392242995694467767E-2443
4057 2.770863823040371961214978472338835E-2443 1.385431911520185980607489236169418E-2443
4058 6.92715955760092990303744618084709E-2444 3.463579778800464951518723090423545E-2444
4059 1.731789889400232475759361545211773E-2444 8.658949447001162378796807726058863E-2445
4060 4.329474723500581189398403863029432E-2445 2.164737361750290594699201931514716E-2445
4061 1.082368680875145297349600965757358E-2445 5.41184340437572648674800482878679E-2446
4062 2.705921702187863243374002414393395E-2446 1.352960851093931621687001207196698E-2446
4063 6.76480425546965810843500603598349E-2447 3.382402127734829054217503017991745E-2447
4064 1.691201063867414527108751508995873E-2447 8.456005319337072635543757544979363E-2448
4065 4.228002659668536317771878772489682E-2448 2.114001329834268158885939386244841E-2448
4066 1.057000664917134079442969693122421E-2448 5.285003324585670397214848465612103E-2449
4067 2.642501662292835198607424232806052E-2449 1.321250831146417599303712116403026E-2449
4068 6.60625415573208799651856058201513E-2450 3.303127077866043998259280291007565E-2450
4069 1.651563538933021999129640145503783E-2450 8.257817694665109995648200727518913E-2451
4070 4.128908847332554997824100363759457E-2451 2.064454423666277498912050181879728E-2451
4071 1.032227211833138749456025090939864E-2451 5.16113605916569374728012545469932E-2452
4072 2.58056802958284687364006272734966E-2452 1.29028401479142343682003136367483E-2452
4073 6.45142007395711718410015681837415E-2453 3.225710036978558592050078409187075E-2453
4074 1.612855018489279296025039204593538E-2453 8.064275092446396480125196022967688E-2454
4075 4.032137546223198240062598011483844E-2454 2.016068773111599120031299005741922E-2454
4076 1.008034386555799560015649502870961E-2454 5.040171932778997800078247514354805E-2455
4077 2.520085966389498900039123757177403E-2455 1.260042983194749450019561878588701E-2455
4078 6.300214915973747250097809392943505E-2456 3.150107457986873625048904696471753E-2456
4079 1.575053728993436812524452348235877E-2456 7.875268644967184062622261741179383E-2457
4080 3.937634322483592031311130870589692E-2457 1.968817161241796015655565435294846E-2457
4081 9.84408580620898007827782717647423E-2458 4.922042903104490039138913588237115E-2458
4082 2.461021451552245019569456794118558E-2458 1.230510725776122509784728397059279E-2458
4083 6.152553628880612548923641985296395E-2459 3.076276814440306274461820992648198E-2459
4084 1.538138407220153137230910496324099E-2459 7.690692036100765686154552481620495E-2460
4085 3.845346018050382843077276240810248E-2460 1.922673009025191421538638120405124E-2460
4086 9.61336504512595710769319060202562E-2461 4.80668252256297855384659530101281E-2461
4087 2.403341261281489276923297650506405E-2461 1.201670630640744638461648825253203E-2461
4088 6.008353153203723192308244126266015E-2462 3.004176576601861596154122063133008E-2462
4089 1.502088288300930798077061031566504E-2462 7.51044144150465399038530515783252E-2463
4090 3.75522072075232699519265257891626E-2463 1.87761036037616349759632628945813E-2463
4091 9.38805180188081748798163144729065E-2464 4.694025900940408743990815723645325E-2464
4092 2.347012950470204371995407861822663E-2464 1.173506475235102185997703930911331E-2464
4093 5.867532376175510929988519654556655E-2465 2.933766188087755464994259827278328E-2465
4094 1.466883094043877732497129913639164E-2465 7.33441547021938866248564956819582E-2466
4095 3.66720773510969433124282478409791E-2466 1.833603867554847165621412392048955E-2466
4096 9.168019337774235828107061960244775E-2467 4.584009668887117914053530980122388E-2467
4097 2.292004834443558957026765490061194E-2467 1.146002417221779478513382745030597E-2467
4098 5.730012086108897392566913725152985E-2468 2.865006043054448696283456862576493E-2468
4099 1.432503021527224348141728431288247E-2468 7.162515107636121740708642156441233E-2469
4100 3.581257553818060870354321078220617E-2469 1.790628776909030435177160539110308E-2469
4101 8.95314388454515217588580269555154E-2470 4.47657194227257608794290134777577E-2470
4102 2.238285971136288043971450673887885E-2470 1.119142985568144021985725336943943E-2470
4103 5.595714927840720109928626684719715E-2471 2.797857463920360054964313342359858E-2471
4104 1.398928731960180027482156671179929E-2471 6.994643659800900137410783355899645E-2472
4105 3.497321829900450068705391677949823E-2472 1.748660914950225034352695838974911E-2472
4106 8.743304574751125171763479194874555E-2473 4.371652287375562585881739597437278E-2473
4107 2.185826143687781292940869798718639E-2473 1.092913071843890646470434899359320E-2473
4108 5.46456535921945323235217449679660E-2474 2.73228267960972661617608724839830E-2474
4109 1.36614133980486330808804362419915E-2474 6.83070669902431654044021812099575E-2475
4110 3.415353349512158270220109060497875E-2475 1.707676674756079135110054530248938E-2475
4111 8.53838337378039567555027265124469E-2476 4.269191686890197837775136325622345E-2476
4112 2.134595843445098918887568162811173E-2476 1.067297921722549459443784081405586E-2476
4113 5.33648960861274729721892040702793E-2477 2.668244804306373648609460203513965E-2477
4114 1.334122402153186824304730101756983E-2477 6.670612010765934121523650508784913E-2478
4115 3.335306005382967060761825254392457E-2478 1.667653002691483530380912627196228E-2478
4116 8.33826501345741765190456313598114E-2479 4.16913250672870882595228156799057E-2479
4117 2.084566253364354412976140783995285E-2479 1.042283126682177206488070391997643E-2479
4118 5.211415633410886032440351959988215E-2480 2.605707816705443016220175979994108E-2480
4119 1.302853908352721508110087989997054E-2480 6.51426954176360754055043994998527E-2481
4120 3.257134770881803770275219974992635E-2481 1.628567385440901885137609987496318E-2481
4121 8.14283692720450942568804993748159E-2482 4.071418463602254712844024968740795E-2482
4122 2.035709231801127356422012484370398E-2482 1.017854615900563678211006242185199E-2482
4123 5.089273079502818391055031210925995E-2483 2.544636539751409195527515605462998E-2483
4124 1.272318269875704597763757802731499E-2483 6.361591349378522988818789013657495E-2484
4125 3.180795674689261494409394506828748E-2484 1.590397837344630747204697253414374E-2484
4126 7.95198918672315373602348626707187E-2485 3.975994593361576868011743133535935E-2485
| |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from termcolor import cprint,colored
from danpy.sb import dsb,get_terminal_width
from pendulum_eqns.init_FF_sinusoid_model import *
N_seconds = 10
N = N_seconds*10000 + 1
Time = np.linspace(0,N_seconds,N)
dt = Time[1]-Time[0]
def run_sim_FF_sinus_act(**kwargs):
"""
Runs one simulation for FEEDFORWARD SINUSOIDAL INPUT control.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**kwargs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1) Bounds - must be a (2,2) list with each row in ascending order. Default is given by Tension_Bounds.
2) InitialAngularAcceleration - must be a float or an int. Default is 0 (starting from rest).
3) thresh - must be an int. Default is 25.
4) FixedInitialTension - will be passed to find_viable_initial_values and will fix the value of initial tension. Must be a (2,) numpy.ndarray. Run find_initial_tension outside of the loop for a given seed and then feed it through the pipeline.
5) Amps - list of length 2 that has the amplitudes of sinusoidal activation trajectories.
6) Freq - scalar value given in Hz.
7) PhaseOffset - scalar value in [0,360).
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**kwargs (Passed to find_viable_initial_values())
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8) InitialAngularAcceleration - must be either a numpy.float64, float, or int. Default is set to 0 to simulate starting from rest. Choice of reference trajectory *should* not matter as it is either 0 or d2r(0) (either by convention or by choice).
9) InitialAngularSnap - must be either a numpy.float64, float, or int. Default is set to 0 to simulate starting from rest. Choice of reference trajectory *should* not matter as it is either 0 or d4r(0) (either by convention or by choice).
"""
thresh = kwargs.get("thresh",25)
assert type(thresh)==int, "thresh should be an int as it is the number of attempts the program should run before stopping."
Bounds = kwargs.get("Bounds",ActivationBounds)
assert type(Bounds)==list and np.shape(Bounds)==(2,2), "Bounds should be a list of shape (2,2)."
Amps = kwargs.get("Amps",[0.1,0.1])
if Amps != "Scaled":
assert type(Amps)==list and len(Amps)==2, "Amps should be a list of length 2 with values that are within activation bounds."
Freq = kwargs.get("Freq",1)
assert type(Freq) in [int,float], "Freq should be an int or a float."
PhaseOffset = kwargs.get("PhaseOffset",90)
assert (type(PhaseOffset) in [int,float]) and (0<=PhaseOffset<360), "PhaseOffset should be an int or a float in [0,360)."
AnotherIteration = True
AttemptNumber = 1
while AnotherIteration == True:
X = np.zeros((8,N))
InitialTension,InitialMuscleLengths,InitialActivations = \
find_viable_initial_values(**kwargs)
X[:,0] = [
r(0),
dr(0),
InitialTension[0][0],
InitialTension[1][0],
InitialMuscleLengths[0],
InitialMuscleLengths[1],
0,
0]
U = np.zeros((2,N))
if Amps == "Scaled":
Amps = 0.25*InitialActivations
U[0,:] = InitialActivations[0] + Amps[0]*(np.cos(2*np.pi*Freq*Time)-1)
U[1,:] = InitialActivations[1] + Amps[1]*(np.cos(2*np.pi*Freq*Time - PhaseOffset*(np.pi/180)) - np.cos(-PhaseOffset*(np.pi/180)))
try:
cprint("Attempt #" + str(int(AttemptNumber)) + ":\n", 'green')
statusbar = dsb(0,N-1,title=run_sim_FF_sinus_act.__name__)
for i in range(N-1):
X[:,i+1] = X[:,i] + dt*np.array([ dX1_dt(X[:,i]),\
dX2_dt(X[:,i]),\
dX3_dt(X[:,i]),\
dX4_dt(X[:,i]),\
dX5_dt(X[:,i]),\
dX6_dt(X[:,i]),\
dX7_dt(X[:,i],U=U[:,i+1]),\
dX8_dt(X[:,i],U=U[:,i+1])
])
statusbar.update(i)
AnotherIteration = False
return(X,U)
except:
print('\n')
print(" "*(get_terminal_width()\
- len("...Attempt #" + str(int(AttemptNumber)) + " Failed. "))\
+ colored("...Attempt #" + str(int(AttemptNumber)) + " Failed. \n",'red'))
AttemptNumber += 1
if AttemptNumber > thresh:
AnotherIteration=False
return(np.zeros((8,N)),np.zeros((2,N)))
def run_N_sim_FF_sinus_act(**kwargs):
"""
Runs one simulation for FEEDFORWARD SINUSOIDAL INPUT control.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**kwargs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
0) NumberOfTrials - positive integer value.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**kwargs (Passed to run_sim_FF_sinus_act())
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1) Bounds - must be a (2,2) list with each row in ascending order. Default is given by Tension_Bounds.
2) InitialAngularAcceleration - must be a float or an int. Default is 0 (starting from rest).
3) thresh - must be an int. Default is 25.
4) FixedInitialTension - will be passed to find_viable_initial_values and will fix the value of initial tension. Must be a (2,) numpy.ndarray. Run find_initial_tension outside of the loop for a given seed and then feed it through the pipeline.
5) Amps - list of length 2 that has the amplitudes of sinusoidal activation trajectories.
6) Freq - scalar value given in Hz.
7) PhaseOffset - scalar value in [0,360).
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**kwargs (Passed to find_viable_initial_values())
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8) InitialAngularAcceleration - must be either a numpy.float64, float, or int. Default is set to 0 to simulate starting from rest. Choice of reference trajectory *should* not matter as it is either 0 or d2r(0) (either by convention or by choice).
9) InitialAngularSnap - must be either a numpy.float64, float, or int. Default is set to 0 to simulate starting from rest. Choice of reference trajectory *should* not matter as it is either 0 or d4r(0) (either by convention or by choice).
"""
NumberOfTrials = kwargs.get("NumberOfTrials",10)
TotalX = np.zeros((NumberOfTrials,8,N))
TotalU = np.zeros((NumberOfTrials,2,N))
TerminalWidth = get_terminal_width()
print("\n")
for j in range(NumberOfTrials):
TrialTitle = (
" Trial "
+ str(j+1)
+ "/" +str(NumberOfTrials)
+ " \n")
print(
" "*int(TerminalWidth/2 - len(TrialTitle)/2)
+ colored(TrialTitle,'white',attrs=["underline","bold"])
)
TotalX[j],TotalU[j] = run_sim_FF_sinus_act(**kwargs)
i=0
NumberOfSuccessfulTrials = NumberOfTrials
while i < NumberOfSuccessfulTrials:
if (TotalX[i]==np.zeros((8,np.shape(TotalX)[2]))).all():
TotalX = np.delete(TotalX,i,0)
TotalU = np.delete(TotalU,i,0)
NumberOfSuccessfulTrials-=1
if NumberOfSuccessfulTrials==0: raise ValueError("No Successful Trials!")
else:
i+=1
print(
"Number of Desired Runs: "
+ str(NumberOfTrials)
+ "\n"
+ "Number of Successful Runs: "
+ str(NumberOfSuccessfulTrials)
+ "\n"
)
return(TotalX,TotalU)
def plot_N_sim_FF_sinus_act(t,TotalX,TotalU,**kwargs):
Return = kwargs.get("Return",False)
assert type(Return) == bool, "Return should either be True or False"
ReturnError = kwargs.get("ReturnError",False)
assert type(ReturnError)==bool, "ReturnError should be either True or False."
fig1 = plt.figure(figsize = (9,7))
fig1_title = "Underdetermined Forced-Pendulum Example"
plt.title(fig1_title,fontsize=16,color='gray')
statusbar = dsb(0,np.shape(TotalX)[0],title=(plot_N_sim_FF_sinus_act.__name__ + " (" + fig1_title +")"))
for j in range(np.shape(TotalX)[0]):
plt.plot(t,(TotalX[j,0,:])*180/np.pi,'0.70',lw=2)
statusbar.update(j)
plt.plot(np.linspace(0,t[-1],1001),\
(r(np.linspace(0,t[-1],1001)))*180/np.pi,\
'r')
plt.xlabel("Time (s)")
plt.ylabel("Desired Measure (Deg)")
fig2 = plt.figure(figsize = (9,7))
fig2_title = "Error vs. Time"
plt.title(fig2_title)
statusbar.reset(title=(plot_N_sim_FF_sinus_act.__name__ + " (" + fig2_title +")"))
for j in range(np.shape(TotalX)[0]):
plt.plot(t, (r(t)-TotalX[j,0,:])*180/np.pi,color='0.70')
statusbar.update(j)
plt.xlabel("Time (s)")
plt.ylabel("Error (Deg)")
statusbar.reset(
title=(
plot_N_sim_FF_sinus_act.__name__
+ " (Plotting States, Inputs, and Muscle Length Comparisons)"
)
)
for j in range(np.shape(TotalX)[0]):
if j == 0:
fig3 = plot_states(t,TotalX[j],Return=True,InputString = "Muscle Activations")
fig4 = plot_inputs(t,TotalU[j],Return=True,InputString = "Muscle Activations")
fig5,Error = plot_l_m_comparison(
t,TotalX[j],MuscleLengths=TotalX[j,4:6,:],
Return=True,InputString="Muscle Activation",ReturnError=True
)
Error1 = Error[0][np.newaxis,:]
Error2 = Error[1][np.newaxis,:]
else:
fig3 = plot_states(t,TotalX[j],Return=True,InputString = "Muscle Activations",\
Figure=fig3)
fig4 = plot_inputs(t,TotalU[j],Return=True,InputString = "Muscle Activations", \
Figure = fig4)
fig5,Error = plot_l_m_comparison(
t,TotalX[j],MuscleLengths=TotalX[j,4:6,:],
Return=True,InputString="Muscle Activation",ReturnError=True,
Figure=fig5
)
Error1 = np.concatenate([Error1,Error[0][np.newaxis,:]],axis=0)
Error2 = np.concatenate([Error2,Error[1][np.newaxis,:]],axis=0)
statusbar.update(j)
if Return == True:
if ReturnError == True:
return([fig1,fig2,fig3,fig4,fig5],[-Error1,-Error2])
else:
return([fig1,fig2,fig3,fig4,fig5])
else:
if ReturnError == True:
plt.show()
return([-Error1,-Error2])
else:
plt.show()
def plot_l_m_approximation_error_vs_tendon_tension(t,TotalX,Error,**kwargs):
Return = kwargs.get("Return",False)
assert type(Return) == bool, "Return should either be True or False"
InitialTensions = kwargs.get("InitialTensions",[TotalX[0,2:4,0]])
assert type(InitialTensions)==list,"InitialTensions must be a list or arrays"
assert all(np.array([str(type(el))=="<class 'numpy.ndarray'>" for el in InitialTensions])), "All elements of InitialTensions must be a numpy.ndarray."
NumberOfTensionTrials = len(InitialTensions)
TendonTension1 = np.linspace(0.01*F_MAX1,0.9*F_MAX1,1001)
TendonTension2 = np.linspace(0.01*F_MAX2,0.9*F_MAX2,1001)
fig1,axes1 = plt.subplots(2,2,figsize=(10,8))
plt.suptitle("Error from MTU Approx vs. Tendon Tension\nMuscle 1",fontsize=16)
axes1[0][0].set_xlabel("Tendon Tension (N)")
axes1[0][0].set_ylabel("Error (m)")
axes1[0][0].set_xlim(
TotalX[:,2,:].min()-0.1*(TotalX[:,2,:].max()-TotalX[:,2,:].min()),
TotalX[:,2,:].max()+0.1*(TotalX[:,2,:].max()-TotalX[:,2,:].min()))
axes1[0][0].set_ylim(
Error[0].min()-0.1*(Error[0].max()-Error[0].min()),
Error[0].max()+0.1*(Error[0].max()-Error[0].min()))
# axes1[0][0].plot(TendonTension1,Error1,'0.70',lw=2)
axes1[0][1].set_xlabel(r"$\longrightarrow$ Time (s) $\longrightarrow$")
axes1[0][1].set_ylim(axes1[0][0].get_ylim())
axes1[0][1].set_yticklabels(["" for el in axes1[0][1].get_yticks()])
axes1[1][0].set_ylabel(r"$\longleftarrow$ Time (s) $\longleftarrow$")
axes1[1][0].set_xlim(axes1[0][0].get_xlim())
axes1[1][0].set_xticklabels(["" for el in axes1[0][0].get_xticks()])
axes1[1][0].yaxis.tick_right()
axes1[1][0].yaxis.set_label_position("right")
axes1[1][0].set_yticks(-np.array(list(range(N_seconds+1))))
axes1[1][0].set_yticklabels([str(-el) for el in axes1[1][0].get_yticks()])
axes1[1][1].text(0.00,0.65,
(r'error $= \frac{\tau}{k}\cdot\ln\left(\frac{e^{T_{1}(t)/\tau} - 1}{e^{T_{1}(0)/\tau} - 1} \right )$'),fontsize=20)
axes1[1][1].text(0.075,0.475,
(r' - $(1 - \cos(\alpha_{1}))\left[l_{m,1}(t) - l_{m,1}(0) \right]$'), fontsize=16)
axes1[1][1].text(0.15,0.325,
(r'where, $\tau = F_{MAX,1}\cdot c^T \cdot k^T$'),fontsize=14)
axes1[1][1].text(0.15,0.15,
(r'and $k = \frac{F_{MAX,1}\cdot c^T}{l_{T_{o,1}}}$'),fontsize=14)
axes1[1][1].axis('off')
fig2,axes2 = plt.subplots(2,2,figsize=(10,8))
plt.suptitle("Error from MTU Approx vs. Tendon Tension\nMuscle 2",fontsize=16)
axes2[0][0].set_ylabel("Error (m)")
axes2[0][0].set_xlabel("Tendon Tension (N)")
axes2[0][0].set_xlim(
TotalX[:,3,:].min()-0.1*(TotalX[:,3,:].max()-TotalX[:,3,:].min()),
TotalX[:,3,:].max()+0.1*(TotalX[:,3,:].max()-TotalX[:,3,:].min()))
axes2[0][0].set_ylim(
Error[1].min()-0.1*(Error[1].max()-Error[1].min()),
Error[1].max()+0.1*(Error[1].max()-Error[1].min()))
# axes2[0][0].plot(TendonTension2,Error2,'0.70',lw=2)
axes2[0][1].set_xlabel(r"$\longrightarrow$ Time (s) $\longrightarrow$")
axes2[0][1].set_ylim(axes2[0][0].get_ylim())
axes2[0][1].set_yticklabels(["" for el in axes2[0][1].get_yticks()])
axes2[1][0].set_ylabel(r"$\longleftarrow$ Time (s) $\longleftarrow$")
axes2[1][0].set_xlim(axes2[0][0].get_xlim())
axes2[1][0].set_xticklabels(["" for el in axes2[0][0].get_xticks()])
axes2[1][0].yaxis.tick_right()
axes2[1][0].yaxis.set_label_position("right")
axes2[1][0].set_yticks(-np.array(list(range(N_seconds+1))))
axes2[1][0].set_yticklabels([str(-el) for el in axes1[1][0].get_yticks()])
axes2[1][1].text(0.00,0.65,
(r'error $= \frac{\tau}{k}\cdot\ln\left(\frac{e^{T_{2}(t)/\tau} - 1}{e^{T_{2}(0)/\tau} - 1} \right )$'),fontsize=20)
axes2[1][1].text(0.075,0.475,
(r' - $(1 - \cos(\alpha_{2}))\left[l_{m,2}(t) - l_{m,2}(0) \right]$'), fontsize=16)
axes2[1][1].text(0.15,0.325,
(r'where, $\tau = F_{MAX,2}\cdot c^T \cdot k^T$'),fontsize=14)
axes2[1][1].text(0.15,0.15,
(r'and $k = \frac{F_{MAX,2}\cdot c^T}{l_{T_{o,2}}}$'),fontsize=14)
axes2[1][1].axis('off')
for i in range(NumberOfTensionTrials):
error_function_1 = return_error_func_no_pennation(InitialTensions[i][0],F_MAX1,lTo1)
error_function_2 = return_error_func_no_pennation(InitialTensions[i][1],F_MAX2,lTo2)
Error1 = error_function_1(TendonTension1)
Error2 = error_function_2(TendonTension2)
axes1[0][0].plot(TendonTension1,Error1,str(1-InitialTensions[i][0]/F_MAX1),lw=2)
axes2[0][0].plot(TendonTension2,Error2,str(1-InitialTensions[i][1]/F_MAX2),lw=2)
statusbar = dsb(0,np.shape(TotalX)[0],
title=plot_l_m_approximation_error_vs_tendon_tension.__name__)
for i in range(np.shape(TotalX)[0]):
axes1[0][0].plot(TotalX[i,2,:],Error[0][i])
axes1[0][1].plot(Time,Error[0][i])
axes1[1][0].plot(TotalX[i,2,:],-Time)
axes2[0][0].plot(TotalX[i,3,:],Error[1][i])
axes2[0][1].plot(Time,Error[1][i])
axes2[1][0].plot(TotalX[i,3,:],-Time)
statusbar.update(i)
if Return == True:
return([fig1,fig2])
else:
plt.show()
def plot_l_m_error_manifold(t,TotalX,Error,**kwargs):
Return = kwargs.get("Return",False)
assert type(Return) == bool, "Return should either be True or False"
InitialTensions = kwargs.get("InitialTensions",[TotalX[0,2:4,0]])
assert type(InitialTensions)==list,"InitialTensions must be a list or arrays"
assert all(np.array([str(type(el))=="<class 'numpy.ndarray'>" for el in InitialTensions])), "All elements of InitialTensions must be a numpy.ndarray."
NumberOfTensionTrials = len(InitialTensions)
fig1 = plt.figure(figsize=(10,8))
axes1_1 = fig1.add_subplot(221, | |
<reponame>CSUFTitanRover/TitanRover2018<filename>rover/core/servers/ArduinoSocketServer/misc/NvidiaJoystick_SocketCall.py
#!/bin/sh
### BEGIN INIT INFO
# Provides: RoverMobilityServer
# Required-Start: $remote_fs $network $syslog
# Required_Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Simple script to start a program at boot
# Description: Rover Mobility Server
### END INIT INFO
from socket import *
from datetime import datetime
import time
import pygame
#import RPi.GPIO as GPIO
# WAIT FOR STUFF
time.sleep(5)
#LED Signals for status
#GPIO.setmode(GPIO.BCM)
#GPIO.setwarnings(False)
redLed = 18
greenLed = 23
blueLed = 24
#GPIO.setup(redLed, GPIO.OUT) #Red LED
#GPIO.setup(greenLed, GPIO.OUT) #Green LED
#GPIO.setup(blueLed, GPIO.OUT) #Blue LED
pygame.init()
# Initialize the joysticks
pygame.joystick.init()
# Arduino address and connection info
address = ('192.168.1.177', 5000)
client_socket = socket(AF_INET, SOCK_DGRAM)
#client_socket.settimeout(.5)
# Globals variables for data transmittion
global re_data
re_data = ""
global data
data = ""
# Globals for motor output
global motor2
motor2 = 0
global motor1
motor1 = 0
global pauseInterval
pauseInterval = 0
global pauseQuitInterval
pauseQuitInterval = 0
global pauseFull
pauseFull = False
global modeWhenPaused
modeWhenPaused = ""
#global motor1, motor2, pauseInterval, pauseQuitInterval, modeWhenPause, motor_mult, arm1, arm2, joint5, joint6, joint7, mode
# Global variable for motor throttle
global motor_mult
motor_mult = .5
# Globals variables for Arm and Joints
global arm1
arm1 = 0
global arm2
arm2 = 0
global joint5
joint5 = 0
global joint6
joint6 = 0
global joint7
joint7 = 0
global mode
mode = "both"
# Initialize the connection to Arduino
client_socket.sendto('0,0', address)
def stop():
try:
re_data, addr = client_socket.recvfrom(2048)
if re_data == "ready":
data = str(0) + ',' + str(0) + ',' + str(0) + ',' + str(0) + ',' + str(0) + ',' + str(0) + ',' + str(0) + ',' + str(0) + ',' + str(0) + ',' + str(0)
joint1 = joint5 = joint6 = joint7 = 0
client_socket.sendto(data, address)
print("Sent Stop Command")
except:
print("Failed to send Stop Command")
pass
return;
# This function changes the color of the LED on the Pi, based off of the current mode the rover is in
# Green meaning BOTH Mobility and Arm are active
# Blue meaning ONLY Mobility is active
# Purple meaning ONLY Arm is active
# Red meaning NEITHER Mobility or Arm is actice, we are in a paused state
# Input: NONE
# Output: NONE
#def changeLedColor():
#if mode == "both":
#LED Color = Green
#GPIO.output(greenLed,GPIO.HIGH)
#GPIO.output(redLed,GPIO.LOW)
#GPIO.output(blueLed,GPIO.LOW)
#elif mode == "mobility":
#LED Color = Blue
#GPIO.output(greenLed, GPIO.LOW)
#GPIO.output(redLed, GPIO.LOW)
#GPIO.output(blueLed, GPIO.HIGH)
#elif mode == "arm":
#LED Color = Purple
#GPIO.output(greenLed, GPIO.LOW)
#GPIO.output(redLed,GPIO.HIGH)
#GPIO.output(blueLed,GPIO.HIGH)
#elif mode == "pause":
#LED Color = Red
#GPIO.output(redLed,GPIO.HIGH)
#GPIO.output(greenLed,GPIO.LOW)
#GPIO.output(blueLed,GPIO.LOW)
# This function takes in a joystick and reads the values of each joystick Axis.
# Based on the values of the joystick axis it sets the corresponding motor values
# to be sent to the ESC.
# Input: Joystick
# Output: None
def checkJoystickMovement(currentJoystick):
global motor1, motor2, pauseInterval, pauseQuitInterval, modeWhenPause, motor_mult, arm1, arm2, joint5, joint6, joint7, mode
axes = currentJoystick.get_numaxes()
# Check for axes usage
for i in range( axes ):
axis = joystick.get_axis( i )
if mode == "mobility" or mode == "both":
if i == 1:
motor1 = -int(127 * axis * motor_mult)
if i == 0:
motor2 = int(127 * axis * motor_mult)
if mode == "arm" or mode == "both":
if i == 2:
arm1 = int(127 * axis)
if i == 3:
arm2 = int(127 * axis)
# This function takes in a joystick and cycles through all of the buttons to see if any are pushed.
# If any are pushed it sets the corresponding command to the rover.
# Input: Joystick
# Output: None
# List of Buttons and what they move (Logitech Wireless Controller).
# Button 1: Joint 5.1 Rotate End Effector Left
# Button 2: Joint 4 Move Up
# Button 3: Joint 5.1 Rotate End Effector Right
# Button 4: Joint 4 Move Down
# Button 5: Joint 5.2 Close End Effector
# Button 6: Joint 5.2 Open End Effector
# Button 7: Decrease Motor Multiplier
# Button 8: Increase Motor Multiplier
# Button 9: Pause/Unpause Rover commands
# Button 10: Switch between modes
# Note: These button numbers are as they are on the controller.
# In the for loop the buttons go from 0-9 not 1-10
def checkButtons(currentJoystick):
global motor1, motor2, pauseInterval, pauseQuitInterval, modeWhenPause, motor_mult, arm1, arm2, joint5, joint6, joint7, mode, modeWhenPaused
#Get the number of buttons on the joystick
buttons = currentJoystick.get_numbuttons()
# Cycle through every button set corresponding values depending on whether button is pushed or not.
# Set the corresponding joint values if buttons 1-6
# Adjust the motor multiplier if buttons 7,8
# Pause/Unpause rover if Button 9
# Switch modes if Button 10
for i in range( buttons ):
#Gets whether button is pushed or not (0 = not pushed, 1 = pushed)
button = joystick.get_button( i )
#If arm is active set joint values
if mode == "both" or mode == "arm":
# Joint commands
if i == 1:
joint5 = button
elif i == 3 and joint5 == 0:
joint5 = -button
if i == 0:
joint6 = button
elif i == 2 and joint6 == 0:
joint6 = -button
if i == 4:
joint7 = button
elif i == 5 and joint7 == 0:
joint7 = -button
# If mobility is active change multiplier if buttons are pushed
if mode == "both" or mode == "mobility":
# Motor Multiplier Commands
if i == 6 and button == 1 and motor_mult > 0.31:
motor_mult = motor_mult - .1
print(motor_mult)
if i == 7 and button == 1 and motor_mult < .9:
motor_mult = motor_mult + .1
print(motor_mult)
# If Pause button is held down for atleast 3 seconds pause/unpause
if i == 9 and button == 1:
if pauseQuitInterval == 0:
pauseQuitInterval = datetime.now()
elif (datetime.now() - pauseQuitInterval).seconds > 3:
if mode != "pause":
print("Pausing Controls")
#Keeps mode when paused so we can return to the same mode
modeWhenPaused = mode
mode = "pause"
pauseQuitInterval = 0
stop()
#changeLedColor()
elif mode == "pause":
print("Resuming Controls")
mode = modeWhenPaused
modeWhenPaused = ""
pauseQuitInterval = 0
#changeLedColor()
elif i == 9 and button == 0 and pauseQuitInterval !=0:
print("Reseting Pause Interval")
pauseQuitInterval = 0
#This button switches between different Modes
#Green = Arm and Mobility
#Blue = Mobility
#Purple = Arm
#Red = None (Paused)
if i == 8 and button == 1:
#print(pauseInterval)
if pauseInterval == 0:
pauseInterval = datetime.now()
elif mode == "both":
if (datetime.now() - pauseInterval).seconds > 3:
print("Switching to MOBILITY ONLY mode")
mode = "mobility"
#stop()
pauseInterval = 0
#LED color = Blue
#changeLedColor()
elif mode == "mobility":
if (datetime.now() - pauseInterval).seconds > 3:
print("Switcching to ARM ONLY mode")
#stop()
mode = "arm"
pauseInterval = 0
#LED Color = Purple
#changeLedColor()
elif mode == "arm":
if (datetime.now() - pauseInterval).seconds > 3:
print("Switching to BOTH mode")
#stop()
mode = "both"
pauseInterval = 0
#LED Color = Green
#changeLedColor()
elif i == 8 and button == 0 and pauseInterval != 0:
print("reseting Pause Interval")
pauseInterval = 0
def checkHats(currentJoystick):
global motor1, motor2, pauseInterval, pauseQuitInterval, modeWhenPause, motor_mult, arm1, arm2, joint5, joint6, joint7, mode
hats = currentJoystick.get_numhats()
if mode == "arm" or mode == "both":
for i in range( hats ):
hat = joystick.get_hat( i )
if hat[0] != 0:
joint1 = hat[0]
else:
joint1 = 0
#Depending on which mode we are in only the correct buttons will be checked
while(1):
#changeLedColor()
for event in pygame.event.get(): # User did something
# Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION
if event.type == pygame.JOYBUTTONDOWN:
print("Joystick button pressed.")
if event.type == pygame.JOYBUTTONUP:
print("Joystick button released.")
# Get count of joysticks
joystick_count = pygame.joystick.get_count()
# Sends Shutdown motor and arm commands if joystick is lost
if joystick_count == 0:
stop()
# For each joystick:
for i in range(joystick_count):
joystick = pygame.joystick.Joystick(i)
joystick.init()
# Get the name from the OS for the controller/joystick
name = joystick.get_name()
checkJoystickMovement(joystick)
checkButtons(joystick)
checkHats(joystick)
# Command to Arduino
if mode != 'pause':
print('Sending Command to Arduino')
try:
if mode == 'both':
data = str(motor1) + ',' + str(motor2) + ',' + str(arm1) + ',' + str(arm2) + ',' + str(joint1) + ',' + str(joint5) + ',' + str(joint6) + ',' + str(joint7) + ',' + '0' + ',' + '0'
elif mode | |
import os
import sys
import time
import calendar
import copy
import math
import Tkinter as Tk
import tkMessageBox
import re
import cPickle as pickle
try:
import Pmw
except ImportError:
pmwpath = "./Pmw"
sys.path.append(pmwpath)
import Pmw
# Configure
taf_out_file = './taf_out_file/'
tafdir2 = './tafout2/'
hrrr_file = './modeldata/hrrr.pickle'
sites = ["KCRW", "KHTS", "KPKB", "KCKB", "KEKN", "KBKW"]
width = 800
height = 300
height_vis = 220
axis_color = "black"
fx_color = "black"
dot_color = "#D1FFBD"
pct_width_raxis = 0.95
pct_width_laxis = 0.05
pct_top = 0.1
pct_bot = 0.9
pct_graph_width = pct_width_raxis - pct_width_laxis
pct_graph_height = pct_bot - pct_top
graph_height = pct_graph_height * height
graph_height_vis = pct_graph_height * height_vis
heights = ["100", "030", "020", "010", "005", "001"]
visbys = ["10", "6", "3", "1", "0.5", "0.25", "0.1"]
tag1 = "theline"
_ValidVsby = {
'0': 0.1,
'M1/4': 0.1,
'1/4': 0.25,
'1/2': 0.5,
'3/4': 0.75,
'1': 1.0,
'1 1/2': 1.5,
'2': 2.0,
'3': 3.0,
'4': 4.0,
'5': 5.0,
'6': 6.0,
'P6': 10.0
}
(year, month, day, jhour, jmin, jsec, wday, yday, dst) = time.gmtime()
if not os.path.exists(taf_out_file):
os.makedirs(taf_out_file)
if not os.path.exists(tafdir2):
os.makedirs(tafdir2)
class KeyDict(dict):
def __init__(self, default=None):
super(KeyDict, self).__init__()
self.default = default
def __getitem__(self, key):
if key not in self:
self[key] = self.default()
return dict.__getitem__(self, key)
class TafCanvas(Tk.Frame):
def __init__(self, master):
self.master = master
self.loc = self.dragged = 0
self.site = ''
self.tafbegin_group, self.tafend_group, self.begin_month, self.begin_day, self.begin_year, self.end_month, \
self.end_day, self.end_year = '', '', '', '', '', '', '', ''
# Set up dictionaries
self.rad = {}
self.saved = {}
self.test = 0
self.cloud_level = Tk.StringVar()
Tk.Frame.__init__(self, master)
label_cig = Tk.Label(self, text="Ceiling")
label_cig.pack(pady=10)
canvas = Tk.Canvas(self, width=800, height=300, relief=Tk.RIDGE, background="white", borderwidth=1)
self.c = canvas
# Cig Canvas #############
# top and bottom
self.c.create_line(pct_width_laxis * width, pct_top * height, pct_width_raxis * width, pct_top * height,
fill=axis_color)
self.c.create_line(pct_width_laxis * width, pct_bot * height, pct_width_raxis * width, pct_bot * height,
fill=axis_color)
# left and right
self.c.create_line(pct_width_laxis * width, pct_top * height, pct_width_laxis * width, pct_bot * height,
fill=axis_color)
self.c.create_line(pct_width_raxis * width, pct_top * height, pct_width_raxis * width, pct_bot * height,
fill=axis_color)
x, y2 = self.graph_coord_from_taf(0, 3000)
x, y3 = self.graph_coord_from_taf(0, 1000)
x, y4 = self.graph_coord_from_taf(0, 500)
x, y5 = self.graph_coord_from_taf(0, 100)
# VFR
self.c.create_rectangle(pct_width_laxis * width, pct_top * height, pct_width_raxis * width, y2, fill="#A9FFA6")
# MVFR
self.c.create_rectangle(
pct_width_laxis * width,
y2,
pct_width_raxis * width,
y3,
fill="#FFFF58")
# IFR
self.c.create_rectangle(
pct_width_laxis * width,
y3,
pct_width_raxis * width,
y4,
fill="#FFCC9D")
# LIFR
self.c.create_rectangle(
pct_width_laxis * width,
y4,
pct_width_raxis * width,
y5,
fill="#FFD1F3")
(year, month, taf_day, taf_hour, tmin, jsec, wday, yday, dst) = time.gmtime()
if 18 <= taf_hour <= 23:
self.taf_package_hour = 18
elif 12 <= taf_hour < 18:
self.taf_package_hour = 12
elif 6 <= taf_hour < 12:
self.taf_package_hour = 6
elif 0 <= taf_hour < 6:
self.taf_package_hour = 0
self.tafpackage = (year, month, taf_day, self.taf_package_hour, 0, 0, wday, yday, dst)
print "TAF HOUR:", taf_hour, self.taf_package_hour
# Grid Labels
for i in range(25):
(year, month, taf_day, taf_hour, tmin, jsec, wday, yday, dst) = \
time.gmtime(calendar.timegm(self.tafpackage) + i * 3600)
print year, month, taf_day, taf_hour, tmin, jsec, wday, yday, dst
# Time and vertical lines/X-axis Legend
self.c.create_text(i * pct_graph_width * width / 24 + pct_width_laxis * width, height * 0.05, text=taf_hour,
fill=axis_color)
# Create Grid
self.c.create_line(i * pct_graph_width * width / 24 + pct_width_laxis * width, pct_top * height,
i * pct_graph_width * width / 24 + pct_width_laxis * width, pct_bot * height,
fill=axis_color, dash=(4, 4))
# Ceiling Labels
for index, i in enumerate(range(len(heights))):
j = float(heights[i]) * 100
self.c.create_text(pct_width_laxis * width - 20,
abs(graph_height - (graph_height * (math.log10(j) - 2) / 2)) + pct_top * height,
text=heights[index], fill=axis_color)
# other important thresholds
x, y = self.graph_coord_from_taf(0, 2000)
self.c.create_line(pct_width_laxis * width, y,
24 * pct_graph_width * width / 24 + pct_width_laxis * width, y, fill=axis_color, dash=(4, 4))
x, y = self.graph_coord_from_taf(0, 600)
self.c.create_line(pct_width_laxis * width, y,
24 * pct_graph_width * width / 24 + pct_width_laxis * width, y, fill=axis_color, dash=(4, 4))
canvas.pack(expand=1, fill=Tk.BOTH)
canvas.tag_bind("Ceiling", "<ButtonPress-2>", self.down)
canvas.tag_bind("Ceiling", "<ButtonRelease-2>", self.chkup)
canvas.tag_bind("Ceiling", "<Enter>", self.enter)
canvas.tag_bind("Ceiling", "<Leave>", self.leave)
self.c.bind("<Button-1>", self.draw_fx_cig)
self.c.bind("<Button-3>", self.delete_point_cig)
# VIS CANVAS
labelvis = Tk.Label(self, text="Visibility")
labelvis.pack(pady=10)
canvas_vis = Tk.Canvas(self, width=800, height=height_vis, relief=Tk.RIDGE, background="white", borderwidth=1)
self.cv = canvas_vis
# top and bottom
self.cv.create_line(pct_width_laxis * width, pct_top * height_vis, pct_width_raxis * width,
pct_top * height_vis, fill=axis_color)
self.cv.create_line(pct_width_laxis * width, pct_bot * height_vis, pct_width_raxis * width,
pct_bot * height_vis, fill=axis_color)
# left and right
self.cv.create_line(pct_width_laxis * width, pct_top * height_vis, pct_width_laxis * width,
pct_bot * height_vis, fill=axis_color)
self.cv.create_line(pct_width_raxis * width, pct_top * height_vis, pct_width_raxis * width,
pct_bot * height_vis, fill=axis_color)
x, y2 = self.graph_coord_from_taf_vis(0, 6)
x, y3 = self.graph_coord_from_taf_vis(0, 3)
x, y4 = self.graph_coord_from_taf_vis(0, 1)
x, y5 = self.graph_coord_from_taf_vis(0, 0.1)
# VFR
self.cv.create_rectangle(pct_width_laxis * width, pct_top * height_vis, pct_width_raxis * width, y2,
fill="#A9FFA6")
# MVFR
self.cv.create_rectangle(
pct_width_laxis * width,
y2,
pct_width_raxis * width,
y3,
fill="#FFFF58")
# IFR
self.cv.create_rectangle(
pct_width_laxis * width,
y3,
pct_width_raxis * width,
y4,
fill="#FFCC9D")
# LIFR
self.cv.create_rectangle(
pct_width_laxis * width,
y4,
pct_width_raxis * width,
y5,
fill="#FFD1F3")
x, y = self.graph_coord_from_taf_vis(0, 2)
self.cv.create_line(pct_width_laxis * width, y,
24 * pct_graph_width * width / 24 + pct_width_laxis * width, y, fill=axis_color,
dash=(4, 4))
x, y = self.graph_coord_from_taf_vis(0, 0.5)
self.cv.create_line(pct_width_laxis * width, y,
24 * pct_graph_width * width / 24 + pct_width_laxis * width, y, fill=axis_color,
dash=(4, 4))
x, y = self.graph_coord_from_taf_vis(0, 0.25)
self.cv.create_line(pct_width_laxis * width, y,
24 * pct_graph_width * width / 24 + pct_width_laxis * width, y, fill=axis_color,
dash=(4, 4))
# x-axis set-up
for i in range(25):
(year, month, taf_day, taf_hour, tmin, jsec, wday, yday, dst) = time.gmtime(
calendar.timegm(self.tafpackage) + i * 3600)
# time and vertical lines/X-axis Legend
self.cv.create_text(i * pct_graph_width * width / 24 + pct_width_laxis * width, height_vis * 0.05,
text=taf_hour, fill=axis_color)
self.cv.create_line(i * pct_graph_width * width / 24 + pct_width_laxis * width, pct_top * height_vis,
i * pct_graph_width * width / 24 + pct_width_laxis * width, pct_bot * height_vis,
fill=axis_color, dash=(4, 4))
# y-axis set-up
for index, i in enumerate(range(len(visbys))):
j = float(visbys[i])
test = abs(graph_height_vis - (
(graph_height_vis * math.log10(j) / 2) + graph_height_vis / 2.0)) + pct_top * height_vis
self.cv.create_text(pct_width_laxis * width - 20, test, text=visbys[index], fill=axis_color)
self.cv.pack(expand=1, fill=Tk.BOTH)
self.cv.tag_bind("Visibility", "<ButtonPress-2>", self.down_vis)
self.cv.tag_bind("Visibility", "<ButtonRelease-2>", self.chkup)
self.cv.tag_bind("Visibility", "<Enter>", self.enter)
self.cv.tag_bind("Visibility", "<Leave>", self.leave)
self.cv.bind("<Button-1>", self.draw_fx_vis)
self.cv.bind("<Button-3>", self.delete_point_vis)
# Controls ##########################
self.Frame_right = Tk.Frame(root, bd=2, relief='flat', padx=5, pady=5, width=width)
self.Frame_right.grid(row=0, column=1)
self.Frame = Tk.Frame(self.Frame_right, bd=2, relief='groove', padx=5, pady=5, width=width)
self.Frame.grid(row=1, column=0, pady=40)
self.taf_label = Tk.Label(self.Frame_right,
text="Click on graphs \nto generate TAF\n\nTAF will show up here after\n you have both visibility and ceilings.\n\n1) Left click will place points\n2) Middle click to drag/move\n3) Right click to delete",
bd=3, bg='white', relief="raised", anchor=Tk.W, justify=Tk.LEFT)
self.taf_label.grid(row=0, column=0, sticky=Tk.W, padx=10)
########################
cigarr = ["FEW", "SCT", "BKN", "OVC"]
# self.skypick = Tk.StringVar()
# self.skyoption = Pmw.OptionMenu(self.Frame,
# menubutton_textvariable = self.skypick,
# items = cigarr,
# initialitem="OVC",
# command = lambda i=0: self.draw_cig_line()
# )
# self.skyoption.grid(row=0,column=6)
# self.skypick = Tk.StringVar()
# self.skyoption = Pmw.OptionMenu(self.Frame,
# menubutton_textvariable = self.skypick,
# items = cigarr,
# initialitem="BKN",
# command = lambda i=0: self.draw_cig_line()
# )
# self.skyoption.grid(row=1,column=6)
self.skypick = Tk.StringVar()
self.skyoption = Pmw.OptionMenu(self.Frame,
menubutton_textvariable=self.skypick,
items=cigarr,
initialitem="BKN",
command=lambda i=0: self.draw_cig_line()
)
self.skyoption.grid(row=1, column=1)
####################
self.sitelabel = Tk.Label(self.Frame, text="Site:", bd=3, fg='#005306', relief="flat", anchor=Tk.W,
justify=Tk.LEFT)
self.sitelabel.grid(row=0, column=0, sticky=Tk.W, padx=10)
self.ciglabel = Tk.Label(self.Frame, text="Ceiling:", bd=3, fg='#005306', relief="flat", anchor=Tk.W,
justify=Tk.LEFT)
self.ciglabel.grid(row=1, column=0, sticky=Tk.W, padx=10)
self.wxlabel = Tk.Label(self.Frame, text="Wx:", bd=3, fg='#005306', relief="flat", anchor=Tk.W, justify=Tk.LEFT)
self.wxlabel.grid(row=3, column=0, sticky=Tk.W, padx=10)
self.wxpick = Tk.StringVar()
wxarr = ['', "FG", "BR", "FZFG", "-RA", "RA", "+RA", "-SN", "SN", "+SN", "-DZ", "DZ", "+DZ", "-TSRA", "TSRA",
"+TSRA"]
self.wxoption = Pmw.OptionMenu(self.Frame,
menubutton_textvariable=self.wxpick,
items=wxarr,
initialitem="",
command=lambda item=0: self.label_taf()
)
self.wxoption.grid(row=3, column=1)
self.wx_pick_two = Tk.StringVar()
self.wx2option = Pmw.OptionMenu(self.Frame,
menubutton_textvariable=self.wx_pick_two,
items=wxarr,
initialitem="",
command=lambda item=0: self.label_taf()
)
self.wx2option.grid(row=3, column=2)
self.sitepick = Tk.StringVar()
self.siteoption = Pmw.OptionMenu(self.Frame,
menubutton_textvariable=self.sitepick,
items=sites,
initialitem="KCRW",
command=lambda item=0: self.read_taf()
)
self.siteoption.grid(row=0, column=1)
self.save_taf = Tk.Button(self.Frame, text="Save", command=self.save_taf, bg="#DCFF92")
self.save_taf.grid(row=4, column=1, pady=20)
self.save_taf = Tk.Button(self.Frame, text="Combine TAFs", command=self.combine_taf, bg="#DCFF92")
self.save_taf.grid(row=5, column=1, pady=0)
status_label = Tk.Label(self.Frame_right, text="Status:", bd=0, relief="flat", anchor=Tk.W, justify=Tk.LEFT)
status_label.grid(row=2, column=0, sticky=Tk.W)
# Site Labels
self.sitelabels = {}
for index, site in enumerate(sites):
self.sitelabels[site] = Tk.Label(self.Frame_right, text=" " + site + " ", bd=2, fg='black', bg='#B2B4A8',
relief="raised", anchor=Tk.W, justify=Tk.LEFT)
self.sitelabels[site].grid(row=index + 3, column=0, sticky=Tk.W)
# Model Checkboxes
self.hrrr = Tk.IntVar()
| |
function, where it is stored, where it travels and how unavailability or unauthorised access, modification or deletion would adversely impact the essential function. This also applies to third parties storing or accessing data important to the operation of essential functions.", # noqa: E501
"answers": [{
"answer": "You have incomplete knowledge of what data is used by and produced in the operation of the essential function.", # noqa: E501
"score": 0
}, {
"answer": "You have not identified the important data on which your essential function relies.", # noqa: E501
"score": 0
}, {
"answer": "You have not identified who has access to data important to the operation of the essential function.", # noqa: E501
"score": 0
}, {
"answer": "You have not clearly articulated the impact of data compromise or inaccessibility.", # noqa: E501
"score": 0
}, {
"answer": "You have identified and catalogued all the data important to the operation of the essential function, or that would assist an attacker.", # noqa: E501
"score": 1
}, {
"answer": "You have identified and catalogued who has access to the data important to the operation of the essential function.", # noqa: E501
"score": 1
}, {
"answer": "You periodically review location, transmission, quantity and quality of data important to the operation of the essential function.", # noqa: E501
"score": 1
}, {
"answer": "You have identified all mobile devices and media that hold data important to the operation of the essential function.", # noqa: E501
"score": 1
}, {
"answer": "You understand and document the impact on your essential function of all relevant scenarios, including unauthorised access, modification or deletion, or when authorised users are unable to appropriately access this data.", # noqa: E501
"score": 1
}, {
"answer": "You occasionally validate these documented impact statements.", # noqa: E501
"score": 1
}, {
"answer": "You have identified and catalogued all the data important to the operation of the essential function, or that would assist an attacker.", # noqa: E501
"score": 2
}, {
"answer": "You have identified and catalogued all the data important to the operation of the essential function, or that would assist an attacker.", # noqa: E501
"score": 2
}, {
"answer": "You maintain a current understanding of the location, quantity and quality of data important to the operation of the essential function.", # noqa: E501
"score": 2
}, {
"answer": "You take steps to remove or minimise unnecessary copies or unneeded historic data.", # noqa: E501
"score": 2
}, {
"answer": "You have identified all mobile devices and media that may hold data important to the operation of the essential function.", # noqa: E501
"score": 2
}, {
"answer": "You maintain a current understanding of the data links used to transmit data that is important to your essential function.", # noqa: E501
"score": 2
}, {
"answer": "You understand the context, limitations and dependencies of your important data.", # noqa: E501
"score": 2
}, {
"answer": "You understand and document the impact on your essential function of all relevant scenarios, including unauthorised data access, modification or deletion, or when authorised users are unable to appropriately access this data.", # noqa: E501
"score": 2
}, {
"answer": "You validate these documented impact statements regularly, at least annually.", # noqa: E501
"score": 2
}]
}, {
"name": "B3b Data in Transit", # noqa: E501
"question": "You have protected the transit of data important to the operation of the essential function. This includes the transfer of data to third parties. ", # noqa: E501
"answers": [{
"answer": "You do not know what all your data links are, or which carry data important to the operation of the essential function.", # noqa: E501
"score": 0
}, {
"answer": "Data important to the operation of the essential function travels without technical protection over non-trusted or openly accessible carriers.", # noqa: E501
"score": 0
}, {
"answer": "Critical data paths that could fail, be jammed, be overloaded, etc. have no alternative path.", # noqa: E501
"score": 0
}, {
"answer": "You have identified and protected (effectively and proportionately) all the data links that carry data important to the operation of your essential function.", # noqa: E501
"score": 1
}, {
"answer": "You apply appropriate technical means (e.g. cryptography) to protect data that travels over non-trusted or openly accessible carriers, but you have limited or no confidence in the robustness of the protection applied.", # noqa: E501
"score": 1
}, {
"answer": "You have identified and protected (effectively and proportionately) all the data links that carry data important to the operation of your essential function.", # noqa: E501
"score": 2
}, {
"answer": "You apply appropriate physical or technical means to protect data that travels over non-trusted or openly accessible carriers, with justified confidence in the robustness of the protection applied.", # noqa: E501
"score": 2
}, {
"answer": "Suitable alternative transmission paths are available where there is a significant risk of impact on the operation of the essential function due to resource limitation (e.g. transmission equipment or function failure, or important data being blocked or jammed).", # noqa: E501
"score": 2
}]
}, {
"name": "B3c Stored Data", # noqa: E501
"question": "You have protected stored data important to the operation of the essential function.", # noqa: E501
"answers": [{
"answer": "You have no, or limited, knowledge of where data important to the operation of the essential function is stored.", # noqa: E501
"score": 0
}, {
"answer": "You have not protected vulnerable stored data important to the operation of the essential function in a suitable way.", # noqa: E501
"score": 0
}, {
"answer": "Backups are incomplete, untested, not adequately secured or could be inaccessible in a disaster recovery or business continuity situation.", # noqa: E501
"score": 0
}, {
"answer": "All copies of data important to the operation of your essential function are necessary. Where this important data is transferred to less secure systems, the data is provided with limited detail and/or as a read-only copy.", # noqa: E501
"score": 1
}, {
"answer": "You have applied suitable physical or technical means to protect this important stored data from unauthorised access, modification or deletion.", # noqa: E501
"score": 1
}, {
"answer": "If cryptographic protections are used, you apply suitable technical and procedural means, but you have limited or no confidence in the robustness of the protection applied.", # noqa: E501
"score": 1
}, {
"answer": "You have suitable, secured backups of data to allow the operation of the essential function to continue should the original data not be available. This may include off-line or segregated backups, or appropriate alternative forms such as paper copies.", # noqa: E501
"score": 1
}, {
"answer": "You have only necessary copies of this data. Where data is transferred to less secure systems, the data is provided with limited detail and/or as a read-only copy.", # noqa: E501
"score": 2
}, {
"answer": "You have applied suitable physical or technical means to protect this important stored data from unauthorised access, modification or deletion.", # noqa: E501
"score": 2
}, {
"answer": "If cryptographic protections are used you apply suitable technical and procedural means, and you have justified confidence in the robustness of the protection applied.", # noqa: E501
"score": 2
}, {
"answer": "You have suitable, secured backups of data to allow the operation of the essential function to continue should the original data not be available. This may include off-line or segregated backups, or appropriate alternative forms such as paper copies.", # noqa: E501
"score": 2
}, {
"answer": "Necessary historic or archive data is suitably secured in storage.", # noqa: E501
"score": 2
}]
}, {
"name": "B3d Mobile Data", # noqa: E501
"question": | |
<gh_stars>10-100
import numpy as np
import scipy.linalg as la
import cvxpy as cp
import torch
import torch.optim as optim
import argparse
import setproctitle
import os
from gym import spaces
import tqdm
import policy_models as pm
import disturb_models as dm
import robust_mpc as rmpc
from envs.random_nldi_env import RandomNLDIEnv
from envs.cartpole import CartPoleEnv
from envs.quadrotor_env import QuadrotorEnv
from envs.random_pldi_env import RandomPLDIEnv
from envs.random_hinf_env import RandomHinfEnv
from envs.microgrid import MicrogridEnv
from constants import *
from rl.ppo import PPO
from rl.rarl_ppo import RARLPPO
from rl.model import Policy
from rl.storage import RolloutStorage
from rl import trainer
from rl import arguments
from envs.rl_wrapper import RLWrapper
# import ipdb
# import sys
# from IPython.core import ultratb
# sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux', call_pdb=1)
def main():
parser = argparse.ArgumentParser(
description='Run robust control experiments.')
parser.add_argument('--baseLR', type=float, default=1e-3,
help='learning rate for non-projected DPS')
parser.add_argument('--robustLR', type=float, default=1e-4,
help='learning rate for projected DPS')
parser.add_argument('--alpha', type=float, default=0.001,
help='exponential stability coefficient')
parser.add_argument('--gamma', type=float, default=20,
help='bound on L2 gain of disturbance-to-output map (for H_inf control)')
parser.add_argument('--epochs', type=int, default=1000,
help='max epochs')
parser.add_argument('--test_frequency', type=int, default=20,
help='frequency of testing during training')
parser.add_argument('--T', type=float, default=2,
help='time horizon in seconds')
parser.add_argument('--dt', type=float, default=0.01,
help='time increment')
parser.add_argument('--testSetSz', type=int, default=50,
help='size of test set')
parser.add_argument('--holdSetSz', type=int, default=50,
help='size of holdout set')
parser.add_argument('--trainBatchSz', type=int, default=20,
help='batch size for training')
parser.add_argument('--stepType', type=str,
choices=['euler', 'RK4', 'scipy'], default='RK4',
help='method for taking steps during training')
parser.add_argument('--testStepType', type=str,
choices=['euler', 'RK4', 'scipy'], default='RK4',
help='method for taking steps during testing')
parser.add_argument('--env', type=str,
choices=['random_nldi-d0', 'random_nldi-dnonzero', 'random_pldi_env',
'random_hinf_env', 'cartpole', 'quadrotor', 'microgrid'],
default='random_nldi-d0',
help='environment')
parser.add_argument('--envRandomSeed', type=int, default=10,
help='random seed used to construct the environment')
parser.add_argument('--save', type=str,
help='prefix to add to save path')
parser.add_argument('--gpu', type=int, default=0,
help='prefix to add to save path')
parser.add_argument('--evaluate', type=str,
help='instead of training, evaluate the models from a given directory'
' (remember to use the same random seed)')
args = parser.parse_args()
dt = args.dt
save_sub = '{}+alpha{}+gamma{}+testSz{}+holdSz{}+trainBatch{}+baselr{}+robustlr{}+T{}+stepType{}+testStepType{}+seed{}+dt{}'.format(
args.env, args.alpha, args.gamma, args.testSetSz, args.holdSetSz,
args.trainBatchSz, args.baseLR, args.robustLR, args.T,
args.stepType, args.testStepType, args.envRandomSeed, dt)
if args.save is not None:
save = os.path.join('results', '{}+{}'.format(args.save, save_sub))
else:
save = os.path.join('results', save_sub)
trained_model_dir = os.path.join(save, 'trained_models')
if not os.path.exists(trained_model_dir):
os.makedirs(trained_model_dir)
setproctitle.setproctitle(save_sub)
device = torch.device('cuda:%d' % args.gpu if torch.cuda.is_available() else 'cpu')
if torch.cuda.is_available():
torch.cuda.set_device(args.gpu)
# Setup
isD0 = (args.env == 'random_nldi-d0') or (args.env == 'quadrotor') # no u dependence in disturbance bound
problem_type = 'nldi'
if 'random_nldi' in args.env:
env = RandomNLDIEnv(isD0=isD0, random_seed=args.envRandomSeed, device=device)
elif args.env == 'random_pldi_env':
env = RandomPLDIEnv(random_seed=args.envRandomSeed, device=device)
problem_type = 'pldi'
elif args.env == 'random_hinf_env':
env = RandomHinfEnv(T=args.T, random_seed=args.envRandomSeed, device=device)
problem_type = 'hinf'
elif args.env == 'cartpole':
env = CartPoleEnv(random_seed=args.envRandomSeed, device=device)
elif args.env == 'quadrotor':
env = QuadrotorEnv(random_seed=args.envRandomSeed, device=device)
elif args.env == 'microgrid':
env = MicrogridEnv(random_seed=args.envRandomSeed, device=device)
else:
raise ValueError('No environment named %s' % args.env)
evaluate_dir = args.evaluate
evaluate = evaluate_dir is not None
# Test and holdout set of states
torch.manual_seed(17)
x_test = env.gen_states(num_states=args.testSetSz, device=device)
x_hold = env.gen_states(num_states=args.holdSetSz, device=device)
num_episode_steps = int(args.T / dt)
if problem_type == 'nldi':
A, B, G, C, D, Q, R = env.get_nldi_linearization()
state_dim = A.shape[0]
action_dim = B.shape[1]
# Get LQR solutions
Kct, Pct = get_lqr_tensors(A, B, Q, R, args.alpha, device)
Kr, Sr = get_robust_lqr_sol(*(v.cpu().numpy() for v in (A, B, G, C, D, Q, R)), args.alpha)
Krt = torch.tensor(Kr, device=device, dtype=TORCH_DTYPE)
Prt = torch.tensor(np.linalg.inv(Sr), device=device, dtype=TORCH_DTYPE)
stable_projection = pm.StableNLDIProjection(Prt, A, B, G, C, D, args.alpha, isD0)
disturb_model = dm.MultiNLDIDisturbModel(x_test.shape[0], C, D, state_dim, action_dim, env.wp)
disturb_model.to(device=device, dtype=TORCH_DTYPE)
elif problem_type == 'pldi':
A, B, Q, R = env.get_pldi_linearization()
state_dim = A.shape[1]
action_dim = B.shape[2]
# Get LQR solutions
Kct, Pct = get_lqr_tensors(A.mean(0), B.mean(0), Q, R, args.alpha, device)
Kr, Sr = get_robust_pldi_policy(*(v.cpu().numpy() for v in (A, B, Q, R)), args.alpha)
Krt = torch.tensor(Kr, device=device, dtype=TORCH_DTYPE)
Prt = torch.tensor(np.linalg.inv(Sr), device=device, dtype=TORCH_DTYPE)
stable_projection = pm.StablePLDIProjection(Prt, A, B)
disturb_model = dm.MultiPLDIDisturbModel(x_test.shape[0], state_dim, action_dim, env.L)
disturb_model.to(device=device, dtype=TORCH_DTYPE)
elif problem_type == 'hinf':
A, B, G, Q, R = env.get_hinf_linearization()
state_dim = A.shape[0]
action_dim = B.shape[1]
# Get LQR solutions
Kct, Pct = get_lqr_tensors(A, B, Q, R, args.alpha, device)
Kr, Sr, mu = get_robust_hinf_policy(*(v.cpu().numpy() for v in (A, B, G, Q, R)), args.alpha, args.gamma)
Krt = torch.tensor(Kr, device=device, dtype=TORCH_DTYPE)
Prt = torch.tensor(np.linalg.inv(Sr), device=device, dtype=TORCH_DTYPE)
stable_projection = pm.StableHinfProjection(Prt, A, B, G, Q, R, args.alpha, args.gamma, 1/mu)
disturb_model = dm.MultiHinfDisturbModel(x_test.shape[0], state_dim, action_dim, env.wp, args.T)
disturb_model.to(device=device, dtype=TORCH_DTYPE)
else:
raise ValueError('No problem type named %s' % problem_type)
adv_disturb_model = dm.MBAdvDisturbModel(env, None, disturb_model, dt, horizon=num_episode_steps//5, update_freq=num_episode_steps//20)
env.adversarial_disturb_f = adv_disturb_model
###########################################################
# LQR baselines
###########################################################
### Vanilla LQR (i.e., non-robust, exponentially stable)
pi_custom_lqr = lambda x: x @ Kct.T
adv_disturb_model.set_policy(pi_custom_lqr)
custom_lqr_perf = eval_model(x_test, pi_custom_lqr, env,
step_type=args.testStepType, T=args.T, dt=dt)
write_results(custom_lqr_perf, 'LQR', save)
custom_lqr_perf = eval_model(x_test, pi_custom_lqr, env,
step_type=args.testStepType, T=args.T, dt=dt, adversarial=True)
write_results(custom_lqr_perf, 'LQR-adv', save)
### Robust LQR
pi_robust_lqr = lambda x: x @ Krt.T
adv_disturb_model.set_policy(pi_robust_lqr)
robust_lqr_perf = eval_model(x_test, pi_robust_lqr, env,
step_type=args.testStepType, T=args.T, dt=dt)
write_results(robust_lqr_perf, 'Robust LQR', save)
robust_lqr_perf = eval_model(x_test, pi_robust_lqr, env,
step_type=args.testStepType, T=args.T, dt=dt, adversarial=True)
write_results(robust_lqr_perf, 'Robust LQR-adv', save)
###########################################################
# Model-based planning methods
###########################################################
### Non-robust MBP (starting with robust LQR solution)
pi_mbp = pm.MBPPolicy(Krt, state_dim, action_dim)
pi_mbp.to(device=device, dtype=TORCH_DTYPE)
adv_disturb_model.set_policy(pi_mbp)
if evaluate:
pi_mbp.load_state_dict(torch.load(os.path.join(evaluate_dir, 'mbp.pt')))
else:
pi_mbp_dict, train_losses, hold_losses, test_losses, test_losses_adv, stop_epoch = \
train(pi_mbp, x_test, x_hold, env,
lr=args.baseLR, batch_size=args.trainBatchSz, epochs=args.epochs, T=args.T, dt=dt, step_type=args.stepType,
test_frequency=args.test_frequency, save_dir=save, model_name='mbp', device=device)
save_results(train_losses, hold_losses, test_losses, test_losses_adv, save, 'mbp', pi_mbp_dict, epoch=stop_epoch,
is_final=True)
torch.save(pi_mbp_dict, os.path.join(trained_model_dir, 'mbp.pt'))
pi_mbp_perf = eval_model(x_test, pi_mbp, env,
step_type=args.testStepType, T=args.T, dt=dt)
write_results(pi_mbp_perf, 'MBP', save)
pi_mbp_perf = eval_model(x_test, pi_mbp, env,
step_type=args.testStepType, T=args.T, dt=dt, adversarial=True)
write_results(pi_mbp_perf, 'MBP-adv', save)
### Robust MBP (starting with robust LQR solution)
pi_robust_mbp = pm.StablePolicy(pm.MBPPolicy(Krt, state_dim, action_dim), stable_projection)
pi_robust_mbp.to(device=device, dtype=TORCH_DTYPE)
adv_disturb_model.set_policy(pi_robust_mbp)
if evaluate:
pi_robust_mbp.load_state_dict(torch.load(os.path.join(evaluate_dir, 'robust_mbp.pt')))
else:
pi_robust_mbp_dict, train_losses, hold_losses, test_losses, test_losses_adv, stop_epoch = \
train(pi_robust_mbp, x_test, x_hold, env,
lr=args.robustLR, batch_size=args.trainBatchSz, epochs=args.epochs, T=args.T, dt=dt, step_type=args.stepType,
test_frequency=args.test_frequency, save_dir=save, model_name='robust_mbp', device=device)
save_results(train_losses, hold_losses, test_losses, test_losses_adv, save, 'robust_mbp', pi_robust_mbp_dict, epoch=stop_epoch,
is_final=True)
torch.save(pi_robust_mbp_dict, os.path.join(trained_model_dir, 'robust_mbp.pt'))
pi_robust_mbp_perf = eval_model(x_test, pi_robust_mbp, env,
step_type=args.testStepType, T=args.T, dt=dt)
write_results(pi_robust_mbp_perf, 'Robust MBP', save)
pi_robust_mbp_perf = eval_model(x_test, pi_robust_mbp, env,
step_type=args.testStepType, T=args.T, dt=dt, adversarial=True)
write_results(pi_robust_mbp_perf, 'Robust MBP-adv', save)
###########################################################
# RL methods
###########################################################
if 'random_nldi' in args.env:
if isD0:
rmax = 1000
else:
rmax = 1000
elif args.env == 'random_pldi_env':
rmax = 10
elif args.env == 'random_hinf_env':
rmax = 1000
elif args.env == 'cartpole':
rmax = 10
elif args.env == 'quadrotor':
rmax = 1000
elif args.env == 'microgrid':
rmax = 10
else:
raise ValueError('No environment named %s' % args.env)
rl_args = arguments.get_args()
linear_controller_K = Krt
linear_controller_P = Prt
linear_transform = lambda u, x: u + x @ linear_controller_K.T
### Vanilla and robust PPO
base_ppo_perfs = []
base_ppo_adv_perfs = []
robust_ppo_perfs = []
robust_ppo_adv_perfs = []
for seed in range(1):
for robust in [False, True]:
torch.manual_seed(seed)
if robust:
# stable_projection = pm.StableNLDIProjection(linear_controller_P, A, B, G, C, D, args.alpha, isD0=isD0)
action_transform = lambda u, x: stable_projection.project_action(linear_transform(u, x), x)
else:
action_transform = linear_transform
envs = RLWrapper(env, state_dim, action_dim, gamma=rl_args.gamma,
dt=dt, rmax=rmax, step_type='RK4', action_transform=action_transform,
num_envs=rl_args.num_processes, device=device)
eval_envs = RLWrapper(env, state_dim, action_dim, gamma=rl_args.gamma,
dt=dt, rmax=rmax, step_type='RK4', action_transform=action_transform,
num_envs=args.testSetSz, device=device)
actor_critic = Policy(
envs.observation_space.shape,
envs.action_space,
base_kwargs={'recurrent': False})
actor_critic.to(device=device, dtype=TORCH_DTYPE)
agent = PPO(
actor_critic,
rl_args.clip_param,
rl_args.ppo_epoch,
rl_args.num_mini_batch,
rl_args.value_loss_coef,
rl_args.entropy_coef,
lr=rl_args.lr,
eps=rl_args.rms_prop_eps,
max_grad_norm=rl_args.max_grad_norm,
use_linear_lr_decay=rl_args.use_linear_lr_decay)
rollouts = RolloutStorage(num_episode_steps, rl_args.num_processes,
envs.observation_space.shape, envs.action_space,
actor_critic.recurrent_hidden_state_size)
ppo_pi = lambda x: action_transform(actor_critic.act(x, None, None, deterministic=True)[1], x)
adv_disturb_model.set_policy(ppo_pi)
if evaluate:
actor_critic.load_state_dict(torch.load(os.path.join(evaluate_dir,
'robust_ppo.pt' if robust else 'ppo.pt')))
else:
hold_costs, test_costs, adv_test_costs =\
trainer.train(agent, envs, rollouts, device, rl_args,
eval_envs=eval_envs, x_hold=x_hold, x_test=x_test, num_episode_steps=num_episode_steps,
save_dir=os.path.join(save, 'robust_ppo' if robust else 'ppo'),
save_extension='%d' % seed)
save_results(np.zeros_like(hold_costs), hold_costs, test_costs, adv_test_costs, save,
'robust_ppo' if robust else 'ppo', actor_critic.state_dict(),
epoch=rl_args.num_env_steps, is_final=True)
torch.save(actor_critic.state_dict(), os.path.join(trained_model_dir,
'robust_ppo.pt' if robust else 'ppo.pt'))
ppo_perf = eval_model(x_test, ppo_pi, env,
step_type=args.testStepType, T=args.T, dt=dt)
ppo_adv_perf = eval_model(x_test, ppo_pi, env,
step_type=args.testStepType, T=args.T, dt=dt, adversarial=True)
if robust:
robust_ppo_perfs.append(ppo_perf.item())
robust_ppo_adv_perfs.append(ppo_adv_perf.item())
else:
base_ppo_perfs.append(ppo_perf.item())
base_ppo_adv_perfs.append(ppo_adv_perf.item())
write_results(base_ppo_perfs, 'PPO', save)
write_results(robust_ppo_perfs, 'Robust PPO', save)
write_results(base_ppo_adv_perfs, 'PPO-adv', save)
write_results(robust_ppo_adv_perfs, 'Robust PPO-adv', save)
# RARL PPO baseline
adv_ppo_perfs = []
adv_ppo_adv_perfs = []
seed = 0
torch.manual_seed(seed)
action_transform = linear_transform
envs = RLWrapper(env, state_dim, action_dim, gamma=rl_args.gamma,
dt=dt, rmax=rmax, step_type='RK4', action_transform=action_transform,
num_envs=rl_args.num_processes, device=device, rarl=True)
eval_envs = RLWrapper(env, state_dim, action_dim, gamma=rl_args.gamma,
dt=dt, rmax=rmax, step_type='RK4', action_transform=action_transform,
num_envs=args.testSetSz, device=device)
protagornist_ac = Policy(
envs.observation_space.shape,
envs.action_space,
base_kwargs={'recurrent': False})
protagornist_ac.to(device=device, dtype=TORCH_DTYPE)
adversary_ac = Policy(
envs.observation_space.shape,
envs.disturb_space,
base_kwargs={'recurrent': False})
adversary_ac.to(device=device, dtype=TORCH_DTYPE)
agent = RARLPPO(
protagornist_ac,
adversary_ac,
rl_args.clip_param,
rl_args.ppo_epoch,
rl_args.num_mini_batch,
rl_args.value_loss_coef,
rl_args.entropy_coef,
lr=rl_args.lr,
eps=rl_args.rms_prop_eps,
max_grad_norm=rl_args.max_grad_norm,
use_linear_lr_decay=rl_args.use_linear_lr_decay)
action_space = spaces.Box(low=0, high=1,
shape=(envs.action_space.shape[0]+envs.disturb_space.shape[0],), dtype=NUMPY_DTYPE)
rollouts = RolloutStorage(num_episode_steps, rl_args.num_processes,
| |
+ d_a_xa - d_x_xa - d_ka_a - d_b_kb
if d >= -negligeable:
return False
# villes à déplacer
ech = []
bp = b
if bp < a:
bp = b + nb
for i in range(a, bp + 1):
ech.append(chemin[i % nb])
ech.reverse()
diff = bp - a + 1
xp = x
if xp < b:
xp += nb
for le in range(b + 1, xp + 1):
ll = le % nb
bp = (a + le - b - 1) % nb
chemin[bp] = chemin[ll]
for le in range(0, len(ech)):
chemin[(x + le - diff + 1 + nb) % nb] = ech[le]
return True
def dessin_arete_zone(chemin, taille_zone, X, Y):
"""
Retourne une liste de listes de listes,
``res[i][j]`` est une liste des arêtes passant près de la zone ``(x,y) = [i][j]``,
si *k* in ``res[i][j]``, alors l'arête *k*, *k+1* est dans la zone *(i,j)*,
*X* est le nombre de zones horizontalement, *Y* est le nombre de zones verticalement,
*taille_zone* est la longueur du côté du carré d'une zone.
"""
res = [[[] for j in range(0, Y + 1)] for i in range(0, X + 1)]
nb = len(chemin)
mx = min(_[0] for _ in chemin)
my = min(_[1] for _ in chemin)
for i in range(0, nb):
a = chemin[i]
b = chemin[(i + 1) % nb]
x1, x2 = int(
(a[0] - mx) // taille_zone), int((b[0] - mx) // taille_zone)
y1, y2 = int(
(a[1] - my) // taille_zone), int((b[1] - my) // taille_zone)
line = draw_line(x1, y1, x2, y2)
for x, y in line:
res[x][y].append(i)
return res
def voisinage_zone_xy(x, y, X, Y):
"""
Retourne la liste des voisins d'une zone *(x,y)*
sachant qu'il y a *X* zones sur l'axe des abscisses
et *Y* zones sur l'axe des ordonnées,
inclus *z* dans cette liste
"""
voisin = [(x, y)]
if x > 0:
voisin.append((x - 1, y))
if x < X - 1:
voisin.append((x + 1, y))
if y > 0:
voisin.append((x, y - 1))
if y < Y - 1:
voisin.append((x, y + 1))
if x > 0 and y > 0:
voisin.append((x - 1, y - 1))
if x > 0 and y < Y - 1:
voisin.append((x - 1, y + 1))
if x < X - 1 and y > 0:
voisin.append((x + 1, y - 1))
if x < X - 1 and y < Y - 1:
voisin.append((x + 1, y + 1))
return voisin
def echange_position(chemin, taille, taille_zone, X, Y, grande=0.5, fLOG=None, distance=None):
"""
Regarde si on ne peut pas déplacer un segment de longueur taille
pour supprimer les arêtes les plus longues,
au maximum <grande> longues arêtes,
retourne le nombre de changement effectués,
*X* est le nombre de zones horizontalement,
*Y* est le nombre de zones verticalement,
*taille_zone* est la longueur d'un côté du carré d'une zone.
"""
nb = len(chemin)
def tri_arete(x, y):
"""pour trier la liste l par ordre décroissant"""
if x[2] < y[2]:
return 1
elif x[2] > y[2]:
return -1
else:
return 0
tmx = min(v[0] for v in chemin)
tmy = min(v[1] for v in chemin)
# list des arêtes triés par ordre décroissant
la = []
for i in range(0, nb):
im = (i + 1) % nb
la.append((i, im, distance(chemin[i], chemin[im])))
la = list(sorted(la, key=functools.cmp_to_key(tri_arete)))
# zone associée à chaque arête
zone = dessin_arete_zone(chemin, taille_zone, X, Y)
dseuil = la[int(nb * grande)][2]
nbtout = 0
nb_change = 0
iarete = 0
retour = {}
for t in range(1, taille + 1):
retour[t] = 0
while iarete < nb:
nb_change = 0
arete = la[iarete]
iarete += 1
x = arete[0]
xm = arete[1]
a = chemin[x]
b = chemin[xm]
d = distance(a, b)
if d < dseuil:
break # arête trop petite
# zone traversée par la ligne
x1, x2 = (int((a[0] - tmx) // taille_zone),
int((b[0] - tmx) // taille_zone))
y1, y2 = (int((a[1] - tmy) // taille_zone),
int((b[1] - tmy) // taille_zone))
ens = draw_line(x1, y1, x2, y2)
ville = []
for k, l in ens:
voisin = voisinage_zone_xy(k, l, X, Y)
for u, v in voisin:
ville.extend(zone[u][v])
# on supprime les doubles
ville.sort()
if len(ville) == 0:
continue
sup = []
mx = -1
for v in ville:
if v == mx:
sup.append(v)
mx = v
for s in sup:
ville.remove(s)
# on étudie les possibilités de casser l'arête (x,xm) aux alentours des villes
# comprise dans l'ensemble ville
for t in range(1, taille + 1):
for i in ville:
# on essaye d'insérer le sous-chemin (x- t + 1 + nb) --> x
# au milieu de l'arête i,i+1
b = echange_position_essai(
chemin, (x - t + 1 + nb) % nb, x, i, False, distance=distance)
if b:
nb_change += 1
retour[t] += 1
continue
# on essaye d'insérer le sous-chemin (xm+ t - 1) --> xm
# au milieu de l'arête i,i+1
b = echange_position_essai(
chemin, (xm + t - 1) % nb, xm, i, False, distance=distance)
if b:
nb_change += 1
retour[t] += 1
continue
# on essaye de casser l'arête x,xm en insérant
# le sous-chemin i --> (i+t) % nb
b = echange_position_essai(
chemin, i, (i + t) % nb, x, False, distance=distance)
if b:
nb_change += 1
retour[t] += 1
continue
# idem
b = echange_position_essai(
chemin, i, (i + t) % nb, x, True, distance=distance)
if b:
retour[t] += 1
nb_change += 1
continue
# idem
b = echange_position_essai(
chemin, (i - t + nb) % nb, i, x, False, distance=distance)
if b:
nb_change += 1
retour[t] += 1
continue
# idem
b = echange_position_essai(
chemin, (i - t + nb) % nb, i, x, True, distance=distance)
if b:
retour[t] += 1
nb_change += 1
continue
nbtout += nb_change
fLOG("nombre de déplacements %d longueur : \t %10.0f --- \t"
% (nbtout, longueur_chemin(chemin, distance=distance)), " --- : ", retour)
return nbtout
def supprime_croisement(chemin, taille_zone, X, Y, fLOG, distance=None):
"""
Supprime les croisements d'arêtes,
retourne le nombre de changement effectués,
*X* est le nombre de zones horizontalement,
*Y* est le nombre de zones verticalement,
*taille_zone* est la longueur d'un côté du carré d'une zone
"""
nb = len(chemin)
tmx = min(v[0] for v in chemin)
tmy = min(v[1] for v in chemin)
# zone associée à chaque arête
zone = dessin_arete_zone(chemin, taille_zone, X, Y)
nbtout = 0
for i in range(0, nb):
im = (i + 1) % nb
a = chemin[i]
b = chemin[im]
# zone traversée par la ligne
x1, x2 = (int((a[0] - tmx) // taille_zone),
int((b[0] - tmx) // taille_zone))
y1, y2 = (int((a[1] - tmy) // taille_zone),
int((b[1] - tmy) // taille_zone))
ens = draw_line(x1, y1, x2, y2)
ville = []
for k, l in ens:
voisin = voisinage_zone_xy(k, l, X, Y)
for u, v in voisin:
ville.extend(zone[u][v])
# on supprime les doubles
ville.sort()
if len(ville) == 0:
continue
sup = []
mx = -1
for v in ville:
if v == mx:
sup.append(v)
mx = v
for s in sup:
ville.remove(s)
nb_change = 0
for v in ville:
b = retournement_essai(chemin, i, v, distance=distance)
if b:
nb_change += 1
continue
b = retournement_essai(chemin, im, v, distance=distance)
if b:
nb_change += 1
continue
nbtout += nb_change
fLOG("nombre de croisements %d longueur : \t %10.0f --- \t"
% (nbtout, longueur_chemin(chemin, distance=distance)))
return nbtout
def amelioration_chemin(chemin, taille_zone, X, Y, taille=10, screen=None,
fLOG=None, pygame=None, max_iter=None, images=None,
distance=None):
"""
Amélioration du chemin par un algorithme simple,
utilise des retournements de taille au plus *taille*,
traite les arcs qui se croisent,
traite les grands arcs, utilise un quadrillage de taille *window*,
*X* est le nombre de zones horizontalement,
*Y* est le nombre de zones verticalement,
*taille_zone* est la longueur d'un côté du carré d'une zone.
"""
white = 255, 255, 255
if pygame is not | |
be defined within a single Note.
'''
def __init__(self, number=None):
MusicXMLElement.__init__(self)
self._tag = 'lyric'
# attributes
# can be a number for mult line or name (e.g. chorus)
self._attr['number'] = None
# entities
self.syllabic = None # begin, middle, end, or single
self.text = None
def filterLyric(self, text):
'''
Remove and fix character strings that cause problems in MusicXML
'''
# this results in incorrect encoding of </>
#text = xml.sax.saxutils.escape(text)
if (text is None):
return None
text = text.replace('>', six.unichr(62))
text = text.replace('<', six.unichr(60))
text = text.replace('&', six.unichr(38))
# need to remove hyphens; but — and similar do not work
text = text.replace('-', six.unichr(8211))
return text
def _getComponents(self):
c = []
c.append(('syllabic', self.syllabic))
# only filter when getting components
c.append(('text', self.filterLyric(self.text)))
return c
class Pitch(MusicXMLElement):
def __init__(self):
MusicXMLElement.__init__(self)
self._tag = 'pitch'
# simple entities
self.step = None # string
self.alter = None # string
self.octave = None # string
def _getComponents(self):
c = []
c.append(('step', self.step))
c.append(('alter', self.alter))
c.append(('octave', self.octave))
return c
def setDefaults(self):
self.set('step', defaults.pitchStep)
self.set('octave', defaults.pitchOctave)
class TimeModification(MusicXMLElement):
def __init__(self):
MusicXMLElement.__init__(self)
self._tag = 'time-modification'
# simple elements
self.actualNotes = None
self.normalNotes = None
self.normalType = None
self.normalDot = None
def _getComponents(self):
c = []
c.append(('actual-notes', self.actualNotes))
c.append(('normal-notes', self.normalNotes))
c.append(('normal-type', self.normalType))
c.append(('normal-dot', self.normalDot))
return c
#-------------------------------------------------------------------------------
# Harmony and components
# tags/objects defined here:
# harmony, root, kind, bass, degree, degree-value, degree-alter, degree-type
# the following tags are simple entities:
# inversion, function,
# root-step, root-alter, bass-step, bass-alter,
class Harmony(MusicXMLElementList):
'''A harmony tag stores a root, kind -- eventually becomes converted
to a music21 ChordSymbol object (not Harmony; though ChordSymbol is a
subclass of the music21 object called Harmony, which also deals with
Figured bass and Roman Numerals.
'''
def __init__(self):
MusicXMLElementList.__init__(self)
self._tag = 'harmony'
self.rootObj = None # object
self.function = None # a string, I, II, iii
self.kindObj = None # object
self.inversion = None # non negative integer, 0 for root
self.bassObj = None # object
self.degreeObj = None # object
self.componentList = [] # list of HarmonyChord objects?
self._crossReference['kindObj'] = ['kind']
self._crossReference['rootObj'] = ['root']
self._crossReference['bassObj'] = ['bass']
self._crossReference['degreeObj'] = ['degree']
def _getComponents(self):
c = []
c = c + self.componentList
c.append(self.rootObj)
c.append(('function', self.function))
c.append(self.kindObj)
c.append(('inversion', self.inversion))
c.append(self.bassObj)
c.append(self.degreeObj)
return c
class Root(MusicXMLElement):
'''A root defines a pitch, with a step and an alter
'''
def __init__(self):
MusicXMLElement.__init__(self)
self._tag = 'root'
# simple entities
self.rootStep = None
self.rootAlter = None
def _getComponents(self):
c = []
# as simple entities, must provide tag name here
c.append(('root-step', self.rootStep))
c.append(('root-alter', self.rootAlter))
return c
class Bass(MusicXMLElement):
'''A root defines a pitch, with a step and an alter
'''
def __init__(self):
MusicXMLElement.__init__(self)
self._tag = 'bass'
# simple entities
self.bassStep = None
self.bassAlter = None
def _getComponents(self):
c = []
# as simple elements, must provide tag name here
c.append(('bass-step', self.bassStep))
c.append(('bass-alter', self.bassAlter))
return c
class Kind(MusicXMLElement):
'''A harmony tag stores a root, kind
'''
def __init__(self):
MusicXMLElement.__init__(self)
self._tag = 'kind'
# the type of chord, common values are 'dominant', 'major', etc
self.charData = None
# The text attribute describes how the kind should be spelled if not using symbols
self._attr['text'] = None # can be the text as displayed, like 7
self._attr['use-symbols'] = None # use or no
self._attr['stack-degrees'] = None # yes or no
self._attr['parentheses-degrees'] = None # yes or no
self._attr['bracket-degrees'] = None # yes or no
# not added: print-style, haligh, and valign attributeGroups
class Degree(MusicXMLElementList):
'''The degree type is used to add, alter, or subtract individual notes in the chord.
'''
def __init__(self):
MusicXMLElementList.__init__(self)
self._tag = 'degree'
self.componentList = [] # triples of degree value, alter, type
def _getComponents(self):
c = []
c = c + self.componentList
return c
class DegreeValue(MusicXMLElement):
'''Stores 1 for root, 3 for third, etc
'''
def __init__(self):
MusicXMLElement.__init__(self)
self._tag = 'degree-value'
self.charData = None # stores
class DegreeAlter(MusicXMLElement):
'''Chromatic alteration of current degree
'''
def __init__(self):
MusicXMLElement.__init__(self)
self._tag = 'degree-alter'
self.charData = None # stores semitones values, 1, -1, etc
# if +/- should be used instead of flat/sharp
self._attr['plus-minus'] = None
class DegreeType(MusicXMLElement):
'''addition, alteration, subtraction relative to the kind of current chord
'''
def __init__(self):
MusicXMLElement.__init__(self)
self._tag = 'degree-type'
self.charData = None # add, alter, subtract
#-------------------------------------------------------------------------------
class Tuplet(MusicXMLElement):
'''Stored in notations object and governs presentation and bracket.
'''
def __init__(self):
MusicXMLElement.__init__(self)
self._tag = 'tuplet'
# attributes
self._attr['bracket'] = None
self._attr['number'] = None
self._attr['type'] = None
self._attr['show-number'] = None
self._attr['show-type'] = None
self._attr['line-shape'] = None
self._attr['placement'] = None
# Layout elements in a print statement only apply to the
# current page, system, staff, or measure. Music that follows
# continues to take the default values from the layout included in the defaults element.
class Defaults(MusicXMLElementList):
def __init__(self):
'''From the MusicXML schema:
::
The defaults type specifies score-wide defaults for scaling, layout, and appearance.
<xs:sequence>
<xs:element name="scaling" type="scaling" minOccurs="0"/>
<xs:group ref="layout"/>
<xs:element name="appearance" type="appearance" minOccurs="0"/>
<xs:element name="music-font" type="empty-font" minOccurs="0"/>
<xs:element name="word-font" type="empty-font" minOccurs="0"/>
<xs:element name="lyric-font" type="lyric-font" minOccurs="0" maxOccurs="unbounded"/>
<xs:element name="lyric-language" type="lyric-language" minOccurs="0" maxOccurs="unbounded"/>
</xs:sequence>
Return defaults.
'''
MusicXMLElementList.__init__(self)
self._tag = 'defaults'
# attributes
# elements
self.scalingObj = None
self.layoutList = []
self.appearanceObj = None
self.musicFontObj = None
self.wordFontObj = None
self.lyricFontObj = None
self.lyricLanguage = None
self.componentList = []
def _getComponents(self):
c = []
c.append(self.scalingObj)
c = c + self.layoutList
c.append(self.appearanceObj)
c.append(self.musicFontObj)
c.append(self.wordFontObj)
c.append(self.lyricFontObj)
c.append(self.lyricLanguage)
return c
class Scaling(MusicXMLElement):
def __init__(self):
MusicXMLElement.__init__(self)
self._tag = 'scaling'
# attributes
self.millimeters = None
self.tenths = None
def _getComponents(self):
c = []
# place BEFORE for musicxml components
c.append(('millimeters', self.millimeters))
c.append(('tenths', self.tenths))
return c
class Print(MusicXMLElementList):
def __init__(self):
'''
from direction.mod:
The print element contains general printing parameters,
including the layout elements defined in the layout.mod
file. The part-name-display and part-abbreviation-display
elements used in the score.mod file may also be used here
to change how a part name or abbreviation is displayed over
the course of a piece. They take effect when the current
measure or a succeeding measure starts a new system.
The new-system and new-page attributes indicate whether
to force a system or page break, or to force the current
music onto the same system or page as the preceding music.
Normally this is the first music data within a measure.
If used in multi-part music, they should be placed in the
same positions within each part, or the results are
undefined. The page-number attribute sets the number of a
new page; it is ignored if new-page is not "yes". Version
2.0 adds a blank-page attribute. This is a positive integer
value that specifies the number of blank pages to insert
before the current measure. It is ignored if new-page is
not "yes". These blank pages have no music, but may have
text or images specified by the credit element. This is
used to allow a combination of pages that are all text,
or all text and images, together with pages of music.
Staff spacing between multiple staves is measured in
tenths of staff lines (e.g. 100 = 10 staff lines). This is
deprecated as of Version 1.1; the staff-layout element
should be used instead. If both are present, the
staff-layout values take priority.
Layout elements in a print statement only apply to the
current page, system, staff, or measure. Music that
follows continues to take the default values from the
layout included in the defaults element.
<!ELEMENT print (page-layout?, system-layout?, staff-layout*,
measure-layout?, measure-numbering?, part-name-display?,
part-abbreviation-display?)>
<!ATTLIST print
staff-spacing %tenths; #IMPLIED
new-system %yes-no; #IMPLIED
new-page %yes-no; #IMPLIED
blank-page NMTOKEN #IMPLIED
page-number CDATA #IMPLIED>
Supported in music21:
page-layout
system-layout
new-system
new-page
page-number
'''
MusicXMLElementList.__init__(self)
self._tag = 'print'
# attributes
self._attr['new-system'] = None # yes /no
self._attr['new-page'] = None # yes/no
self._attr['page-number'] = None # yes/no
# elements
self.componentList = [] # contains: page-layout, system-layout, measure-layout, numerous others
class PageLayout(MusicXMLElementList):
def __init__(self):
MusicXMLElementList.__init__(self)
self._tag = 'page-layout'
# elements
self.pageHeight = None #
self.pageWidth = None #
self.componentList = [] # contains: page-margins
| |
<filename>google/maps/fleetengine/v1/maps-fleetengine-v1-py/maps/fleetengine_v1/types/vehicle_api.py
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.protobuf import wrappers_pb2 # type: ignore
from google.type import latlng_pb2 # type: ignore
from maps.fleetengine_v1.types import fleetengine
from maps.fleetengine_v1.types import header as mf_header
from maps.fleetengine_v1.types import vehicles as mf_vehicles
__protobuf__ = proto.module(
package='maps.fleetengine.v1',
manifest={
'CreateVehicleRequest',
'GetVehicleRequest',
'UpdateVehicleRequest',
'UpdateVehicleLocationRequest',
'UpdateVehicleAttributesRequest',
'UpdateVehicleAttributesResponse',
'SearchVehiclesRequest',
'SearchVehiclesResponse',
'ListVehiclesRequest',
'ListVehiclesResponse',
'Waypoint',
'VehicleMatch',
'VehicleAttributeList',
},
)
class CreateVehicleRequest(proto.Message):
r"""CreateVehicle request message.
Attributes:
header (maps.fleetengine_v1.types.RequestHeader):
The standard Fleet Engine request header.
parent (str):
Required. Must be in the format
"providers/{provider}". The provider must be the
Project ID (for example, sample-cloud-project)
of the Google Cloud Project of which the service
account making this call is a member.
vehicle_id (str):
Required. Unique Vehicle ID; must be unique
per provider. The actual format and value is
opaque to the Fleet Engine and is determined by
the provider.
vehicle (maps.fleetengine_v1.types.Vehicle):
Required. The Vehicle entity to create. When creating a
Vehicle, the following fields are required:
- vehicle_state
- supported_trip_types
- maximum_capacity
- vehicle_type
When creating a Vehicle, the following fields are ignored:
- name
- current_trips
- available_capacity
- current_route_segment
- current_route_segment_version
- waypoints
- waypoints_version
- remaining_distance_meters
- eta_to_next_waypoint
- navigation_status
All other fields will be used if provided.
"""
header = proto.Field(
proto.MESSAGE,
number=1,
message=mf_header.RequestHeader,
)
parent = proto.Field(
proto.STRING,
number=3,
)
vehicle_id = proto.Field(
proto.STRING,
number=4,
)
vehicle = proto.Field(
proto.MESSAGE,
number=5,
message=mf_vehicles.Vehicle,
)
class GetVehicleRequest(proto.Message):
r"""GetVehicle request message.
Attributes:
header (maps.fleetengine_v1.types.RequestHeader):
The standard Fleet Engine request header.
name (str):
Required. Must be in the format
"providers/{provider}/vehicles/{vehicle}".
The provider must be the Project ID (for
example, sample-cloud-project) of the Google
Cloud Project of which the service account
making this call is a member.
current_route_segment_version (google.protobuf.timestamp_pb2.Timestamp):
Indicates the minimum timestamp (exclusive) for which
vehicle.current_route_segment is retrieved. If route is
unchanged since this timestamp, the current_route_segment
field is not set in the response. If a minimum is
unspecified, the current_route_segment is always retrieved.
waypoints_version (google.protobuf.timestamp_pb2.Timestamp):
Indicates the minimum timestamp (exclusive)
for which vehicle.waypoints data is retrieved.
If data is unchanged since this timestamp, the
vehicle.waypoints data is not set in the
response. If this field is unspecified,
vehicle.waypoints is always retrieved.
"""
header = proto.Field(
proto.MESSAGE,
number=1,
message=mf_header.RequestHeader,
)
name = proto.Field(
proto.STRING,
number=3,
)
current_route_segment_version = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
)
waypoints_version = proto.Field(
proto.MESSAGE,
number=5,
message=timestamp_pb2.Timestamp,
)
class UpdateVehicleRequest(proto.Message):
r"""UpdateVehicle request message.
Attributes:
header (maps.fleetengine_v1.types.RequestHeader):
The standard Fleet Engine request header.
name (str):
Required. Must be in the format
"providers/{provider}/vehicles/{vehicle}". The {provider}
must be the Project ID (for example, sample-cloud-project)
of the Google Cloud Project of which the service account
making this call is a member.
Note that if the name is also specified in the name field of
the vehicle and name is set in the update_mask, both names
must be the same. Otherwise it is an Error.
vehicle (maps.fleetengine_v1.types.Vehicle):
Required. The Vehicle entity update to apply. When updating
a Vehicle, the following fields may not be updated as they
are managed by the Fleet Engine. current_trips
available_capacity current_route_segment_version
waypoints_version Furthermore, the name of the vehicle
cannot be updated.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. A field mask indicating which fields of the
Vehicle to update. The update_mask must contain at least one
field.
"""
header = proto.Field(
proto.MESSAGE,
number=1,
message=mf_header.RequestHeader,
)
name = proto.Field(
proto.STRING,
number=3,
)
vehicle = proto.Field(
proto.MESSAGE,
number=4,
message=mf_vehicles.Vehicle,
)
update_mask = proto.Field(
proto.MESSAGE,
number=5,
message=field_mask_pb2.FieldMask,
)
class UpdateVehicleLocationRequest(proto.Message):
r"""UpdateVehicleLocation request message.
Attributes:
header (maps.fleetengine_v1.types.RequestHeader):
The standard Fleet Engine request header.
name (str):
Required. Must be in the format
"providers/{provider}/vehicles/{vehicle}.
The {provider} must be the Project ID (for
example, sample-cloud-project) of the Google
Cloud Project of which the service account
making this call is a member.
current_location (maps.fleetengine_v1.types.VehicleLocation):
Required. The location to update to. The last_location and
update_time subfields are required.
current_state (maps.fleetengine_v1.types.VehicleState):
Set current vehicle state to either ONLINE or OFFLINE; if
set to UNKNOWN_VEHICLE_STATE, vehicle state will not be
altered.
"""
header = proto.Field(
proto.MESSAGE,
number=1,
message=mf_header.RequestHeader,
)
name = proto.Field(
proto.STRING,
number=3,
)
current_location = proto.Field(
proto.MESSAGE,
number=4,
message=fleetengine.VehicleLocation,
)
current_state = proto.Field(
proto.ENUM,
number=5,
enum=mf_vehicles.VehicleState,
)
class UpdateVehicleAttributesRequest(proto.Message):
r"""UpdateVehicleAttributes request message.
Attributes:
header (maps.fleetengine_v1.types.RequestHeader):
The standard Fleet Engine request header.
name (str):
Required. Must be in the format
"providers/{provider}/vehicles/{vehicle}.
The provider must be the Project ID (for
example, sample-cloud-project) of the Google
Cloud Project of which the service account
making this call is a member.
attributes (Sequence[maps.fleetengine_v1.types.VehicleAttribute]):
Required. The attributes to update;
unmentioned attributes will not be altered or
removed. At most 20 attributes; the combined
"key:value" string length cannot exceed 256.
"""
header = proto.Field(
proto.MESSAGE,
number=1,
message=mf_header.RequestHeader,
)
name = proto.Field(
proto.STRING,
number=3,
)
attributes = proto.RepeatedField(
proto.MESSAGE,
number=4,
message=fleetengine.VehicleAttribute,
)
class UpdateVehicleAttributesResponse(proto.Message):
r"""UpdateVehicleAttributes response message.
Attributes:
attributes (Sequence[maps.fleetengine_v1.types.VehicleAttribute]):
Required. The updated full list of vehicle
attributes, including new, altered and untouched
attributes.
"""
attributes = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=fleetengine.VehicleAttribute,
)
class SearchVehiclesRequest(proto.Message):
r"""SearchVehicles request message.
Attributes:
header (maps.fleetengine_v1.types.RequestHeader):
The standard Fleet Engine request header.
parent (str):
Required. Must be in the format
"providers/{provider}". The provider must be the
Project ID (for example, sample-cloud-project)
of the Google Cloud Project of which the service
account making this call is a member.
pickup_point (maps.fleetengine_v1.types.TerminalLocation):
Required. The pickup point to search near.
dropoff_point (maps.fleetengine_v1.types.TerminalLocation):
The customer's intended dropoff location. The field is
required if trip_types contains TripType.SHARED.
pickup_radius_meters (int):
Required. Defines the vehicle search radius
around the pickup point. Only vehicles within
the search radius will be returned. Value must
be between 400 and 10000 meters.
count (int):
Required. Specifies the maximum number of
available vehicles to return. By default, the
Fleet Engine limits the number to 50.
minimum_capacity (int):
Required. Specifies the minimum number of
passengers allowed in the vehicle. Must number
must be greater than or equal to one. The driver
is not considered in the capacity search. This
number indicates the number of passengers being
considered for a trip.
trip_types (Sequence[maps.fleetengine_v1.types.TripType]):
Required. Restricts the search to only those
vehicles that support at least one of the
specified trip types.
maximum_staleness (google.protobuf.duration_pb2.Duration):
Restricts the search to only those vehicles
that have updated their locations within the
specified duration back from now. If this field
is not set, the server uses five minutes as the
default value.
vehicle_types (Sequence[maps.fleetengine_v1.types.Vehicle.VehicleType]):
Required. Restricts the search to those
vehicles with the specified types. At least one
vehicle type must be specified.
required_attributes (Sequence[maps.fleetengine_v1.types.VehicleAttribute]):
Callers can form complex logical operations using the
requiredAttributes and requiredOneOfAttributes fields.
requiredAttributes is a list; requiredOneOfAttributes uses a
message which allows a list of lists. In combination, the
two fields allow the composition of this expression:
::
(required_attribute[0] AND required_attribute[1] AND ...)
AND
(required_one_of_attribute[0][0] OR required_one_of_attribute[0][1] OR ...)
AND
(required_one_of_attribute[1][0] OR required_one_of_attribute[1][1] OR ...)
Restricts the search to only those vehicles with the
specified attributes. This field is a conjunction/AND
operation. Your app can specify up to 100 attributes;
however, the combined key:value string length cannot exceed
1024 characters.
required_one_of_attributes (Sequence[maps.fleetengine_v1.types.VehicleAttributeList]):
Restricts the search to only those vehicles
with at least one of the specified attributes
applied to each VehicleAttributeList. Within
each list, a vehicle must match at least one of
the attributes. This field is an inclusive
disjunction/OR operation in each
VehicleAttributeList and a conjunction/AND
operation across the collection of
VehicleAttributeList.
required_one_of_attribute_sets (Sequence[maps.fleetengine_v1.types.VehicleAttributeList]):
Restricts the search to only those vehicles
with at least one | |
"""The scaled dot-product attention mechanism defined in Vaswani et al. (2017).
The attention energies are computed as dot products between the query vector
and the key vector. The query vector is scaled down by the square root of its
dimensionality. This attention function has no trainable parameters.
See arxiv.org/abs/1706.03762
"""
import math
from typing import Tuple, Callable, Union
import tensorflow as tf
from typeguard import check_argument_types
from neuralmonkey.attention.base_attention import (
BaseAttention, Attendable, get_attention_states, get_attention_mask)
from neuralmonkey.attention.namedtuples import MultiHeadLoopState
from neuralmonkey.decorators import tensor
from neuralmonkey.model.model_part import ModelPart
from neuralmonkey.model.parameterized import InitializerSpecs
from neuralmonkey.nn.utils import dropout
def split_for_heads(x: tf.Tensor, n_heads: int, head_dim: int) -> tf.Tensor:
"""Split a tensor for multi-head attention.
Split last dimension of 3D vector of shape ``(batch, time, dim)`` and
return a 4D vector with shape ``(batch, n_heads, time, dim/n_heads)``.
Arguments:
x: input Tensor of shape ``(batch, time, dim)``.
n_heads: Number of attention heads.
head_dim: Dimension of the attention heads.
Returns:
A 4D Tensor of shape ``(batch, n_heads, time, head_dim/n_heads)``
"""
x_shape = tf.shape(x)
x_4d = tf.reshape(tf.expand_dims(x, 2),
[x_shape[0], x_shape[1], n_heads, head_dim])
return tf.transpose(x_4d, perm=[0, 2, 1, 3])
def mask_energies(energies_4d: tf.Tensor,
mask: tf.Tensor,
mask_value=-1e9) -> tf.Tensor:
"""Apply mask to the attention energies before passing to softmax.
Arguments:
energies_4d: Energies of shape ``(batch, n_heads, time(q), time(k))``.
mask: Float Tensor of zeros and ones of shape ``(batch, time(k))``,
specifies valid positions in the energies tensor.
mask_value: Value used to mask energies. Default taken value
from tensor2tensor.
Returns:
Energies (logits) of valid positions. Same shape as ``energies_4d``.
NOTE:
We do not use ``mask_value=-np.inf`` to avoid potential underflow.
"""
mask_4d = tf.expand_dims(tf.expand_dims(mask, 1), 1)
energies_all = energies_4d * mask_4d
# Energies are log probabilities, so setting the invalid energies to
# negative infinity (aka -1e9 for compatibility with tensor2tensor) yields
# probability of zero to the padded positions.
return energies_all + (1.0 - mask_4d) * mask_value
def mask_future(energies: tf.Tensor, mask_value=-1e9) -> tf.Tensor:
"""Mask energies of keys using lower triangular matrix.
Mask simulates autoregressive decoding, such that it prevents
the attention to look at what has not yet been decoded.
Mask is not necessary during training when true output values
are used instead of the decoded ones.
Arguments:
energies: A tensor to mask.
mask_value: Value used to mask energies.
Returns:
Masked energies tensor.
"""
triangular_mask = tf.matrix_band_part(tf.ones_like(energies), -1, 0)
mask_area = tf.equal(triangular_mask, 1)
# Note that for compatibility with tensor2tensor, we use -1e9 for negative
# infinity.
masked_value = tf.fill(tf.shape(energies), mask_value)
return tf.where(mask_area, energies, masked_value)
# pylint: disable=too-many-locals
# TODO split this to more functions
def attention(
queries: tf.Tensor,
keys: tf.Tensor,
values: tf.Tensor,
keys_mask: tf.Tensor,
num_heads: int,
dropout_callback: Callable[[tf.Tensor], tf.Tensor],
masked: bool = False,
use_bias: bool = False) -> tf.Tensor:
"""Run multi-head scaled dot-product attention.
See arxiv.org/abs/1706.03762
When performing multi-head attention, the queries, keys and values
vectors are first split to sets of smaller vectors, one for each attention
head. Next, they are transformed using a linear layer and a separate
attention (from a corresponding head) is applied on each set of
the transformed triple of query, key and value. The resulting contexts
from each head are then concatenated and a linear layer is applied
on this concatenated output. The following can be summed by following
equations::
MultiHead(Q, K, V) = Concat(head_1, ..., head_h) * W_o
head_i = Attention(Q * W_Q_i, K * W_K_i, V * W_V_i)
The scaled dot-product attention is a simple dot-product between
the query and a transposed key vector. The result is then scaled
using square root of the vector dimensions and a softmax layer is applied.
Finally, the output of the softmax layer is multiplied by the value vector.
See the following equation::
Attention(Q, K, V) = softmax(Q * K^T / √(d_k)) * V
Arguments:
queries: Input queries of shape ``(batch, time(q), k_channels)``.
keys: Input keys of shape ``(batch, time(k), k_channels)``.
values: Input values of shape ``(batch, time(k), v_channels)``.
keys_mask: A float Tensor for masking sequences in keys.
num_heads: Number of attention heads.
dropout_callback: Callable function implementing dropout.
masked: Boolean indicating whether we want to mask future energies.
use_bias: If True, enable bias in the attention head projections
(for all queries, keys and values).
Returns:
Contexts of shape ``(batch, time(q), v_channels)`` and
weights of shape ``(batch, time(q), time(k))``.
"""
if num_heads <= 0:
raise ValueError("Number of heads must be greater than zero.")
queries_dim = queries.shape.as_list()[-1]
keys_shape = keys.shape.as_list()
values_shape = values.shape.as_list()
# Query and keys should match in the last dimension
if queries_dim != keys_shape[-1]:
raise ValueError(
"Queries and keys do not match in the last dimension."
" Queries: {}, Keys: {}".format(queries_dim, keys_shape[-1]))
if keys_shape[1] != values_shape[1]:
raise ValueError(
"Keys and values 'time' dimension does not match. "
"Keys: {}, Values: {}".format(keys_shape[1], values_shape[1]))
# Last dimension must be divisible by num_heads
if queries_dim % num_heads != 0:
raise ValueError(
"Last dimension of the query ({}) should be divisible by the "
"number of heads ({})".format(queries_dim, num_heads))
head_dim = int(queries_dim / num_heads)
# For multi-head attention, queries, keys and values are linearly projected
if num_heads > 1:
queries = tf.layers.dense(
queries, queries_dim, use_bias=use_bias, name="query_proj")
keys = tf.layers.dense(
keys, queries_dim, use_bias=use_bias, name="keys_proj")
values = tf.layers.dense(
values, queries_dim, use_bias=use_bias, name="vals_proj")
# Scale first:
queries_scaled = queries / math.sqrt(head_dim)
# Reshape the k_channels dimension to the number of heads
queries = split_for_heads(queries_scaled, num_heads, head_dim)
keys = split_for_heads(keys, num_heads, head_dim)
values = split_for_heads(values, num_heads, head_dim)
# For dot-product, we use matrix multiplication
# shape: batch, head, time(q), time(k) (k_channels is the matmul axis)
energies = tf.matmul(queries, keys, transpose_b=True)
# To protect the attention from looking ahead of time, we must replace the
# energies of future keys with negative infinity
if masked:
energies = mask_future(energies)
# To exclude the padded positions (those after the end of sentence),
# we mask the attention energies given this mask.
if keys_mask is not None:
energies = mask_energies(energies, keys_mask)
energies = tf.identity(energies, "energies")
# Softmax along the last axis
# shape: batch, head, time(q), time(k)
weights = tf.nn.softmax(energies)
# apply dropout to the weights (Attention Dropout)
weights = dropout_callback(weights)
context = tf.matmul(weights, values)
# transpose and reshape to shape [batch, time(q), v_channels]
context_shape = tf.shape(context)
context = tf.reshape(
tf.transpose(context, perm=[0, 2, 1, 3]),
[context_shape[0], context_shape[2], queries_dim])
if num_heads > 1:
# pylint: disable=redefined-variable-type
# This seems like a pylint bug
context = tf.layers.dense(
context, queries_dim, use_bias=use_bias, name="output_proj")
# pylint: enable=redefined-variable-type
return context, weights
# pylint: enable=too-many-locals
def empty_multi_head_loop_state(
batch_size: Union[int, tf.Tensor],
num_heads: Union[int, tf.Tensor],
length: Union[int, tf.Tensor],
dimension: Union[int, tf.Tensor]) -> MultiHeadLoopState:
return MultiHeadLoopState(
contexts=tf.zeros(
shape=[0, batch_size, dimension],
dtype=tf.float32,
name="contexts"),
head_weights=[tf.zeros(
shape=[0, batch_size, length],
dtype=tf.float32,
name="distributions_head{}".format(i)) for i in range(num_heads)])
class MultiHeadAttention(BaseAttention):
# pylint: disable=too-many-arguments
def __init__(self,
name: str,
n_heads: int,
keys_encoder: Attendable,
values_encoder: Attendable = None,
dropout_keep_prob: float = 1.0,
reuse: ModelPart = None,
save_checkpoint: str = None,
load_checkpoint: str = None,
initializers: InitializerSpecs = None) -> None:
check_argument_types()
BaseAttention.__init__(self, name, reuse, save_checkpoint,
load_checkpoint, initializers)
self.n_heads = n_heads
self.dropout_keep_prob = dropout_keep_prob
self.keys_encoder = keys_encoder
if values_encoder is not None:
self.values_encoder = values_encoder
else:
self.values_encoder = self.keys_encoder
if self.n_heads <= 0:
raise ValueError("Number of heads must be greater than zero.")
if self.dropout_keep_prob <= 0.0 or self.dropout_keep_prob > 1.0:
raise ValueError("Dropout keep prob must be inside (0,1].")
self._variable_scope.set_initializer(tf.variance_scaling_initializer(
mode="fan_avg", distribution="uniform"))
# pylint: enable=too-many-arguments
@tensor
def attention_keys(self) -> tf.Tensor:
return get_attention_states(self.keys_encoder)
@tensor
def attention_mask(self) -> tf.Tensor:
return get_attention_mask(self.keys_encoder)
@tensor
def attention_values(self) -> tf.Tensor:
return get_attention_states(self.values_encoder)
def attention(self,
query: tf.Tensor,
decoder_prev_state: tf.Tensor,
decoder_input: tf.Tensor,
loop_state: MultiHeadLoopState) -> Tuple[tf.Tensor,
MultiHeadLoopState]:
"""Run a multi-head attention getting context vector for a given query.
This method is an API-wrapper for the global function 'attention'
defined in this module. Transforms a query of shape(batch, query_size)
to shape(batch, 1, query_size) and applies the attention function.
Output context has shape(batch, 1, value_size) and weights
have shape(batch, n_heads, 1, time(k)). The output is then processed
to produce output vector of contexts and the following attention
loop state.
Arguments:
query: Input query for the current decoding step
of shape(batch, query_size).
decoder_prev_state: Previous state of the decoder.
decoder_input: Input to the RNN cell of the | |
parent_window=self.main_ui)
elif self.action_type == ACTION_RECOVER_FROM_WORDS_SAFE:
device_id, cancelled = recovery_device(self.hw_type, self.hw_device_id_selected, self.word_count,
self.hw_action_use_passphrase, self.hw_action_use_pin, self.hw_action_label, parent_window=self.main_ui)
elif self.action_type == ACTION_INITIALIZE_NEW_SAFE:
device_id, cancelled = reset_device(self.hw_type, self.hw_device_id_selected, self.word_count,
self.hw_action_use_passphrase, self.hw_action_use_pin, self.hw_action_label, parent_window=self.main_ui)
elif self.action_type == ACTION_WIPE_DEVICE:
device_id, cancelled = wipe_device(self.hw_type, self.hw_device_id_selected, parent_window=self.main_ui)
else:
raise Exception('Invalid action.')
if device_id and self.hw_device_id_selected and self.hw_device_id_selected != device_id:
# if Trezor or Keepkey is wiped during the initialization then it gets a new device_id
# update the deice id in the device combobox and a list associated with it
self.hw_device_id_selected = device_id
idx = self.cboDeviceInstance.currentIndex()
if idx >= 0 and idx < len(self.hw_device_instances):
self.hw_device_instances[idx][1] = device_id
if self.hw_action_label is None:
self.hw_action_label = ''
lbl = self.hw_action_label + ' (' + device_id + ')'
self.cboDeviceInstance.setItemText(idx, lbl)
if cancelled:
self.warnMsg('Operation cancelled.')
return False
else:
self.set_next_step(STEP_FINISHED)
return True
@pyqtSlot(bool)
def on_btnNext_clicked(self, clicked):
success = False
if self.current_step == STEP_SELECT_DEVICE_TYPE:
success = self.apply_step_select_device_type()
elif self.current_step == STEP_SELECT_DEVICE_INSTANCE:
success = self.apply_step_select_device_id()
elif self.current_step == STEP_SELECT_ACTION:
success = self.apply_step_select_action()
elif self.current_step == STEP_INPUT_NUMBER_OF_WORDS:
success = self.apply_step_select_number_of_words()
elif self.current_step == STEP_INPUT_ENTROPY:
success = self.apply_step_input_entropy()
elif self.current_step == STEP_INPUT_WORDS:
success = self.apply_step_input_words()
elif self.current_step == STEP_INPUT_HW_OPTIONS:
success = self.apply_step_input_hw_options()
elif self.current_step == STEP_FINISHED:
self.close()
else:
raise Exception("Internal error: invalid step.")
if success:
self.update_current_tab()
self.btnBack.setEnabled(True)
@pyqtSlot(bool)
def on_btnBack_clicked(self, clicked):
if self.current_step > 0:
if self.current_step == STEP_FINISHED:
self.btnNext.setText('Continue')
if self.current_step == STEP_INPUT_ENTROPY:
if self.action_type in (ACTION_RECOVER_FROM_ENTROPY,):
# clear the generated words
for idx in range(len(self.mnemonic_words)):
self.mnemonic_words[idx] = ''
self.current_step = self.step_history.pop()
self.apply_current_step_to_ui()
if self.current_step == 0:
self.btnBack.setEnabled(False)
self.update_current_tab()
def update_current_tab(self):
# display/hide controls on the current page (step), depending on the options set in prevous steps
if self.current_step == STEP_SELECT_DEVICE_TYPE:
msg_text = ''
if self.hw_type == HWType.ledger_nano_s:
msg_text = '<span><b>Important! Start your Ledger Nano S wallet in recovery mode:</b></span>' \
'<ol><li>Clear the device by selecting the \'Settings->Device->Reset all\' menu item.</li>' \
'<li>Power the device off.</li>' \
'<li>Power the device on while holding down the right-hand physical button.</li>' \
'</ol>'
if sys.platform == 'linux':
if msg_text:
msg_text += '<br>'
msg_text += '<b>Important!</b> To make hardware wallet devices visible on linux, ' \
'add the appropriate udev rules (<a href="udev_linux">see the details</a>).'
self.lblStepDeviceTypeMessage.setText(msg_text)
elif self.current_step == STEP_SELECT_DEVICE_INSTANCE:
self.lblStepDeviceInstanceMessage.setText("<b>Select which '%s' device you want to use</b>" %
HWType.get_desc(self.hw_type))
elif self.current_step == STEP_SELECT_ACTION:
if self.hw_type == HWType.ledger_nano_s:
# turn off options not applicable for ledger walltes
self.rbActRecoverWordsSafe.setDisabled(True)
self.rbActInitializeNewSeed.setDisabled(True)
self.rbActWipeDevice.setDisabled(True)
if self.rbActRecoverWordsSafe.isChecked() or self.rbActInitializeNewSeed.isChecked() or \
self.rbActWipeDevice.isChecked():
self.rbActRecoverMnemonicWords.setChecked(True)
else:
self.rbActRecoverWordsSafe.setEnabled(True)
self.rbActInitializeNewSeed.setEnabled(True)
self.rbActWipeDevice.setEnabled(True)
if self.action_type in (ACTION_RECOVER_FROM_WORDS_CONV, ACTION_RECOVER_FROM_ENTROPY):
self.lblActionTypeMessage.setText(
'<span style="color:red;font-weight:bold">This feature should only be used on offline systems '
'which will never be connected to the internet.</span>')
else:
self.lblActionTypeMessage.setText('')
elif self.current_step in (STEP_INPUT_NUMBER_OF_WORDS, STEP_INPUT_ENTROPY):
if self.action_type in (ACTION_RECOVER_FROM_WORDS_CONV, ACTION_RECOVER_FROM_WORDS_SAFE,
ACTION_INITIALIZE_NEW_SAFE):
# recovery based on mnemonic words
self.gbNumberOfMnemonicWords.setVisible(True)
self.lblStep1MnemonicWords.setVisible(True)
self.lblStep1HexEntropy.setVisible(False)
self.edtHexEntropy.setVisible(False)
self.lblStep1MnemonicWords.setText('<b>Number of words in your recovery seed</b>')
elif self.action_type == ACTION_RECOVER_FROM_ENTROPY:
# recovery based on hexadecimal entropy
self.gbNumberOfMnemonicWords.setVisible(False)
self.lblStep1MnemonicWords.setVisible(False)
self.lblStep1HexEntropy.setVisible(True)
self.edtHexEntropy.setVisible(True)
else:
raise Exception('Invalid action type')
elif self.current_step in (STEP_INPUT_WORDS,):
self.lblStepWordListMessage2.setVisible(True)
if self.action_type == ACTION_RECOVER_FROM_WORDS_CONV:
self.grid_model.set_read_only(False)
self.lblStepWordListTitle.setText('<b>Enter your recovery seed words</b>')
self.viewMnemonic.setStyleSheet('')
elif self.action_type == ACTION_RECOVER_FROM_ENTROPY:
self.grid_model.set_read_only(True)
self.lblStepWordListMessage2.setVisible(False)
self.lblStepWordListTitle.setText('<b>Below are the seed words for the provided hexadecimal entropy</b>')
self.viewMnemonic.setStyleSheet('background-color:#e6e6e6')
else:
raise Exception('Invalid action type')
# estimate words columns widths
width = self.viewMnemonic.width()
width = int((width - (2 * 40)) / 2)
self.viewMnemonic.setModel(self.grid_model)
self.viewMnemonic.setColumnWidth(0, 40)
self.viewMnemonic.setColumnWidth(1, width)
self.viewMnemonic.setColumnWidth(2, 40)
elif self.current_step == STEP_INPUT_HW_OPTIONS:
self.edtHwOptionsDeviceLabel.setPlaceholderText('My %s' % HWType.get_desc(self.hw_type))
if self.action_type in (ACTION_RECOVER_FROM_WORDS_CONV, ACTION_RECOVER_FROM_ENTROPY):
if self.entropy:
self.lblHwOptionsMessage1.setText('Your recovery seed hexadecimal entropy: ' + self.entropy.hex())
self.fraDetails.setVisible(self.hw_options_details_visible)
if self.hw_options_details_visible:
self.btnHwOptionsDetails.setText('Hide preview')
else:
self.btnHwOptionsDetails.setText('Show prewiew')
self.edtHwOptionsPIN.setVisible(self.chbHwOptionsUsePIN.isChecked())
self.btnShowPIN.setVisible(self.chbHwOptionsUsePIN.isChecked())
self.btnHwOptionsDetails.setVisible(True)
elif self.action_type in (ACTION_RECOVER_FROM_WORDS_SAFE, ACTION_INITIALIZE_NEW_SAFE):
# trezor/keepkey device will ask for pin, so we'll hide the PIN editbox
self.edtHwOptionsPIN.setVisible(False)
self.btnShowPIN.setVisible(False)
self.btnHwOptionsDetails.setVisible(False)
else:
raise Exception('Invalid action.')
if self.hw_type == HWType.ledger_nano_s:
# for Ledger Nano we have to use PIN
self.chbHwOptionsUsePIN.setChecked(True)
self.chbHwOptionsUsePIN.setEnabled(False)
self.wdgHwOptionsLedger.setVisible(self.chbHwOptionsUsePassphrase.isChecked())
self.lblHwOptionsDeviceLabel.setVisible(False)
self.edtHwOptionsDeviceLabel.setVisible(False)
else:
self.chbHwOptionsUsePIN.setEnabled(True)
self.wdgHwOptionsLedger.setVisible(False)
self.lblHwOptionsDeviceLabel.setVisible(True)
self.edtHwOptionsDeviceLabel.setVisible(True)
@pyqtSlot(bool)
def on_btnCancel_clicked(self):
self.close()
def connect_hardware_wallet(self):
return self.main_ui.connectHardwareWallet()
def set_word_count(self, word_count, checked=True):
if checked:
self.word_count = word_count
self.grid_model.set_words_count(word_count)
@pyqtSlot(bool)
def on_btnGenerateSeed_clicked(self, clicked):
if self.connect_hardware_wallet():
ent_len = {
24: 32,
18: 24,
12: 16}.get(self.word_count)
if ent_len:
entropy = get_entropy(self.main_ui, ent_len)
words = self.entropy_to_mnemonic(entropy)
if len(words) != self.word_count:
raise Exception('Word count inconsistency')
else:
self.set_words(words)
else:
raise Exception('Invalid word count.')
def entropy_to_mnemonic(self, entropy):
words = self.mnemonic.to_mnemonic(entropy)
return words.split()
def set_words(self, words):
for idx, word in enumerate(words):
if idx < len(self.mnemonic_words):
self.mnemonic_words[idx] = word
@pyqtSlot(QPoint)
def on_viewMnemonic_customContextMenuRequested(self, point):
self.popMenuWords.exec_(self.viewMnemonic.mapToGlobal(point))
def get_cur_mnemonic_words(self):
ws = []
for idx, w in enumerate(self.mnemonic_words):
if idx >= self.word_count:
break
ws.append(w)
return ws
def on_actCopyWords_triggered(self):
ws = self.get_cur_mnemonic_words()
ws_str = '\n'.join(ws)
clipboard = QApplication.clipboard()
if clipboard:
clipboard.setText(ws_str)
def on_actPasteWords_triggered(self):
clipboard = QApplication.clipboard()
if clipboard:
ws_str = clipboard.text()
if isinstance(ws_str, str):
ws_str = ws_str.replace('\n',' ').replace('\r',' ').replace(",",' ')
ws = ws_str.split()
for idx, w in enumerate(ws):
if idx >= self.word_count:
break
self.mnemonic_words[idx] = w
self.grid_model.refresh_view()
@pyqtSlot(bool)
def on_btnHwOptionsDetails_clicked(self):
self.hw_options_details_visible = not self.hw_options_details_visible
self.update_current_tab()
@staticmethod
def bip32_descend(*args):
if len(args) == 2 and isinstance(args[1], list):
key, path = args
else:
key, path = args[0], map(int, args[1:])
for p in path:
key = bitcoin.bip32_ckd(key, p)
return key
def get_bip32_private_key(self, path_n, master_key):
priv = self.bip32_descend(master_key, path_n)
ret = bitcoin.bip32_extract_key(priv)
return ret
def refresh_adresses_preview(self):
if self.mnemonic:
bip32_path = self.edtHwOptionsBip32Path.text()
passphrase = self.edtHwOptionsPassphrase.text()
passphrase = self.mnemonic.normalize_string(passphrase)
mnem_str = ' '.join(self.get_cur_mnemonic_words())
bip32_seed = self.mnemonic.to_seed(mnem_str, passphrase)
bip32_master_key = bitcoin.bip32_master_key(bip32_seed)
bip32_path_n = bip32_path_string_to_n(bip32_path)
if len(bip32_path_n) > 0:
last_idx = bip32_path_n[-1]
addresses = []
for idx in range(10):
bip32_path_n[-1] = last_idx + idx
pk = self.get_bip32_private_key(bip32_path_n, bip32_master_key)
pubkey = bitcoin.privkey_to_pubkey(pk)
addr = pubkey_to_address(pubkey)
path_str = bip32_path_n_to_string(bip32_path_n)
addresses.append((path_str, addr))
self.address_preview_model.apply_addresses(addresses)
self.address_preview_model.refresh_view()
self.viewAddresses.resizeColumnsToContents()
@pyqtSlot(bool)
def on_btnRefreshAddressesPreview_clicked(self, check):
self.refresh_adresses_preview()
@pyqtSlot()
def on_edtHwOptionsPassphrase_returnPressed(self):
self.refresh_adresses_preview()
@pyqtSlot()
def on_edtHwOptionsBip32Path_returnPressed(self):
self.refresh_adresses_preview()
def load_hw_devices(self):
"""
Load all instances of the selected hardware wallet type. If there is more than one, user has to select which
one he is going to use.
"""
control_trezor_keepkey_libs(self.hw_type)
self.main_ui.disconnectHardwareWallet() # disconnect hw if it's open in the main window
self.hw_device_instances.clear()
self.cboDeviceInstance.clear()
if self.hw_type == HWType.trezor:
import trezorlib.client as client
from trezorlib.transport_hid import HidTransport
for d in HidTransport.enumerate():
transport = HidTransport(d)
cl = client.TrezorClient(transport)
lbl = cl.features.label + ' (' + cl.features.device_id + ')'
self.hw_device_instances.append([lbl, cl.features.device_id])
self.cboDeviceInstance.addItem(lbl)
cl.clear_session()
cl.close()
elif self.hw_type == HWType.keepkey:
import keepkeylib.client as client
from keepkeylib.transport_hid import HidTransport
for d in HidTransport.enumerate():
transport = HidTransport(d)
cl = client.KeepKeyClient(transport)
lbl = cl.features.label + ' (' + cl.features.device_id + ')'
self.hw_device_instances.append([lbl, cl.features.device_id])
self.cboDeviceInstance.addItem(lbl)
cl.clear_session()
cl.close()
elif self.hw_type == HWType.ledger_nano_s:
from btchip.btchipComm import getDongle
from btchip.btchipException import BTChipException
try:
dongle = getDongle()
if dongle:
lbl = HWType.get_desc(self.hw_type)
self.hw_device_instances.append([lbl, None])
self.cboDeviceInstance.addItem(lbl)
dongle.close()
del dongle
except BTChipException as e:
if e.message != 'No dongle found':
raise
@pyqtSlot(bool)
def on_device_type_changed(self, checked):
if checked:
self.read_device_type_from_ui()
self.update_current_tab()
def read_device_type_from_ui(self):
if self.rbDeviceTrezor.isChecked():
self.hw_type = HWType.trezor
elif self.rbDeviceKeepkey.isChecked():
self.hw_type = HWType.keepkey
elif self.rbDeviceLedger.isChecked():
self.hw_type = HWType.ledger_nano_s
else:
self.hw_type = None
@pyqtSlot(bool)
def on_rbActionType_changed(self, checked):
if checked:
self.read_action_type_from_ui()
self.update_current_tab()
@pyqtSlot(str)
def on_lblStepDeviceTypeMessage_linkActivated(self, link_text):
text = '<h4>To enable hardware wallet devices on your linux system execute the following commands from ' \
'the command line.</h4>' \
'<b>For Trezor hardware wallets:</b><br>' \
'<code>echo "SUBSYSTEM==\\"usb\\", ATTR{idVendor}==\\"534c\\", ATTR{idProduct}==\\"0001\\", ' \
'TAG+=\\"uaccess\\", TAG+=\\"udev-acl\\", SYMLINK+=\\"trezor%n\\"" | ' \
'sudo tee /etc/udev/rules.d/51-trezor-udev.rules<br>' \
'sudo udevadm trigger<br>'\
'sudo udevadm control --reload-rules' \
'</code><br><br>' \
'<b>For Keepkey hardware wallets:</b><br>' \
'<code>echo "SUBSYSTEM==\\"usb\\", ATTR{idVendor}==\\"2b24\\", ATTR{idProduct}==\\"0001\\", ' \
'MODE=\\"0666\\", GROUP=\\"dialout\\", SYMLINK+=\\"keepkey%n\\"" | ' \
'sudo tee /etc/udev/rules.d/51-usb-keepkey.rules'\
'<br>echo "KERNEL==\\"hidraw*\\", ATTRS{idVendor}==\\"2b24\\", ATTRS{idProduct}==\\"0001\\", ' \
'MODE=\\"0666\\", GROUP=\\"dialout\\"" | sudo tee -a /etc/udev/rules.d/51-usb-keepkey.rules<br>' \
'sudo udevadm trigger<br>'\
'sudo udevadm control --reload-rules' \
'</code><br><br>' \
'<b>For Ledger hardware wallets:</b><br>' \
'<code>echo "SUBSYSTEMS==\\"usb\\", ATTRS{idVendor}==\\"2581\\", ATTRS{idProduct}==\\"1b7c\\", ' \
'MODE=\\"0660\\", GROUP=\\"plugdev\\"" | sudo tee /etc/udev/rules.d/20-hw1.rules<br>' \
'echo "SUBSYSTEMS==\\"usb\\", ATTRS{idVendor}==\\"2581\\", ATTRS{idProduct}==\\"2b7c\\", ' \
'MODE=\\"0660\\", GROUP=\\"plugdev\\"" | sudo tee -a /etc/udev/rules.d/20-hw1.rules<br>' \
'echo "SUBSYSTEMS==\\"usb\\", ATTRS{idVendor}==\\"2581\\", ATTRS{idProduct}==\\"3b7c\\", ' \
'MODE=\\"0660\\", GROUP=\\"plugdev\\"" | sudo tee -a /etc/udev/rules.d/20-hw1.rules<br>' \
'echo "SUBSYSTEMS==\\"usb\\", ATTRS{idVendor}==\\"2581\\", ATTRS{idProduct}==\\"4b7c\\", ' \
'MODE=\\"0660\\", GROUP=\\"plugdev\\"" | sudo tee -a /etc/udev/rules.d/20-hw1.rules<br>' \
'echo "SUBSYSTEMS==\\"usb\\", ATTRS{idVendor}==\\"2581\\", ATTRS{idProduct}==\\"1807\\", ' \
'MODE=\\"0660\\", GROUP=\\"plugdev\\"" | sudo tee -a /etc/udev/rules.d/20-hw1.rules<br>' \
'echo "SUBSYSTEMS==\\"usb\\", ATTRS{idVendor}==\\"2581\\", ATTRS{idProduct}==\\"1808\\", ' \
'MODE=\\"0660\\", GROUP=\\"plugdev\\"" | sudo tee -a /etc/udev/rules.d/20-hw1.rules<br>' \
'echo "SUBSYSTEMS==\\"usb\\", ATTRS{idVendor}==\\"2c97\\", ATTRS{idProduct}==\\"0000\\", ' \
'MODE=\\"0660\\", | |
<reponame>konchunas/pytezos
import json
import logging
from decimal import Decimal
from functools import lru_cache
from os.path import exists, expanduser
from typing import Any, Callable, Dict, List, Optional, Union
from urllib.parse import urlparse
import requests
from cached_property import cached_property # type: ignore
from deprecation import deprecated # type: ignore
from pytezos.context.mixin import ContextMixin # type: ignore
from pytezos.context.mixin import ExecutionContext
from pytezos.contract.data import ContractData
from pytezos.contract.entrypoint import ContractEntrypoint
from pytezos.contract.metadata import ContractMetadata
from pytezos.contract.result import ContractCallResult
from pytezos.contract.token_metadata import ContractTokenMetadata
from pytezos.crypto.key import Key
from pytezos.jupyter import get_class_docstring
from pytezos.logging import logger
from pytezos.michelson.format import micheline_to_michelson
from pytezos.michelson.micheline import MichelsonRuntimeError
from pytezos.michelson.parse import michelson_to_micheline
from pytezos.michelson.program import MichelsonProgram
from pytezos.michelson.types.base import generate_pydoc
from pytezos.operation.group import OperationGroup
from pytezos.rpc import ShellQuery
class ContractTokenMetadataProxy:
"""Get TZIP-21 contract token metadata by token_id"""
def __init__(self, fn: Callable) -> None:
self._fn = fn
def __getitem__(self, item):
return self._fn(item)
class ContractInterface(ContextMixin):
"""Proxy class for interacting with a contract."""
program: MichelsonProgram
def __init__(self, context: ExecutionContext) -> None:
super().__init__(context=context)
self._logger = logging.getLogger(__name__)
self._storage: Optional[ContractData] = None
self.entrypoints = self.program.parameter.list_entrypoints()
for entrypoint, ty in self.entrypoints.items():
if entrypoint == 'token_metadata':
continue
attr = ContractEntrypoint(context=context, entrypoint=entrypoint)
attr.__doc__ = generate_pydoc(ty, entrypoint)
setattr(self, entrypoint, attr)
def __repr__(self) -> str:
res = [
super().__repr__(),
'.storage\t# access storage data at block `block_id`',
'.parameter\t# root entrypoint',
'\nEntrypoints',
*list(map(lambda x: f'.{x}()', self.entrypoints)),
'\nHelpers',
get_class_docstring(self.__class__, attr_filter=lambda x: x not in self.entrypoints),
]
return '\n'.join(res)
def __getattr__(self, item: str) -> ContractEntrypoint:
raise AttributeError(f'unexpected entrypoint {item}')
@staticmethod
def from_url(url: str, context: Optional[ExecutionContext] = None) -> 'ContractInterface':
"""Create contract from michelson source code available via URL
:param url: link to the Michelson file
:param context: optional execution context
:rtype: ContractInterface
"""
res = requests.get(url)
if res.status_code != 200:
raise ValueError(f'cannot fetch `{url} {res.status_code}`', res.text)
return ContractInterface.from_michelson(res.text, context)
@staticmethod
def from_file(path: str, context: Optional[ExecutionContext] = None) -> 'ContractInterface':
"""Create contract from michelson source code stored in a file.
:param path: Path to the `.tz` file
:param context: optional execution context
:rtype: ContractInterface
"""
with open(expanduser(path)) as f:
return ContractInterface.from_michelson(f.read(), context)
@staticmethod
def from_michelson(source: str, context: Optional[ExecutionContext] = None) -> 'ContractInterface':
"""Create contract from michelson source code.
:param source: Michelson source code
:param context: optional execution context
:rtype: ContractInterface
"""
return ContractInterface.from_micheline(michelson_to_micheline(source), context)
@staticmethod
def from_micheline(expression: List[Dict[str, Any]], context: Optional[ExecutionContext] = None) -> 'ContractInterface':
"""Create contract from micheline expression.
:param expression: [{'prim': 'parameter'}, {'prim': 'storage'}, {'prim': 'code'}]
:param context: optional execution context
:rtype: ContractInterface
"""
program = MichelsonProgram.match(expression)
cls = type(ContractInterface.__name__, (ContractInterface,), dict(program=program))
context = ExecutionContext(
shell=context.shell if context else None,
key=context.key if context else None,
script=dict(code=expression),
)
return cls(context)
@staticmethod
def from_context(context: ExecutionContext) -> 'ContractInterface':
"""Create contract from the previously loaded context data.
:param context: execution context
:return: ContractInterface
"""
program = MichelsonProgram.load(context, with_code=True)
cls = type(ContractInterface.__name__, (ContractInterface,), dict(program=program))
return cls(context)
@classmethod
@deprecated(
deprecated_in='3.0.0',
removed_in='3.1.0',
details='use one of `from_file`, `from_michelson`, `from_micheline`, `from_url`',
)
def create_from(cls, source):
"""Create contract interface from its code.
:param source: Michelson code, filename, or Micheline JSON
:rtype: ContractInterface
"""
if isinstance(source, str):
if exists(expanduser(source)):
return ContractInterface.from_file(source)
return ContractInterface.from_michelson(source)
return ContractInterface.from_micheline(source)
def to_micheline(self) -> List[Dict[str, Any]]:
"""Get contract script in Micheline JSON
:return: [{'prim': 'parameter'}, {'prim': 'storage'}, {'prim': 'code'}]
"""
return self.program.as_micheline_expr()
def to_michelson(self) -> str:
"""Get contract listing in formatted Michelson
:return: string
"""
return micheline_to_michelson(self.to_micheline())
def to_file(self, path: str) -> None:
"""Write contract source to a .tz file
:param path: path to the file
"""
with open(path, 'w+') as f:
f.write(self.to_michelson())
@deprecated(deprecated_in='3.0.0', removed_in='3.1.0', details='use `.storage[path][to][big_map][key]()` instead')
def big_map_get(self, path):
"""Get BigMap entry as Python object by plain key and block height.
:param path: JSON path to the key (or just key to access default BigMap location).
Use `/` to separate nodes and `::` to separate tuple args.
In any other case you'd need to escape those symbols.
:returns: object
"""
node = self.storage
for item in path.split('/'):
if len(item) == 0:
continue
if isinstance(item, str):
res = item.split('::')
item = tuple(res) if len(res) > 1 else item
node = node[item]
return node() if node else None
def using(
self,
shell: Optional[Union[ShellQuery, str]] = None,
key: Optional[Union[Key, str]] = None,
block_id: Optional[Union[str, int]] = None,
mode: Optional[str] = None,
ipfs_gateway: Optional[str] = None,
) -> 'ContractInterface':
"""Change the block at which the current contract is inspected.
Also, if address is undefined you can specify RPC endpoint, and private key.
:param shell: one of 'mainnet', '***net', or RPC node uri, or instance of :class:`pytezos.rpc.shell.ShellQuery`
:param key: base58 encoded key, path to the faucet file, alias from tezos-client, or instance of `Key`
:param block_id: block height / hash / offset to use, default is `head`
:param mode: whether to use `readable` or `optimized` encoding for parameters/storage/other
:rtype: ContractInterface
"""
has_address = self.context.address is not None
return type(self)(
self._spawn_context(
shell=None if has_address else shell,
key=None if has_address else key,
address=self.context.address,
block_id=block_id,
mode=mode,
ipfs_gateway=ipfs_gateway,
)
)
@property
def storage(self) -> ContractData:
if self._storage:
return self._storage
elif self.address:
expr = self.shell.blocks[self.context.block_id].context.contracts[self.address].storage()
storage = self.program.storage.from_micheline_value(expr)
storage.attach_context(self.context)
else:
storage = self.program.storage.dummy(self.context)
return ContractData(self.context, storage.item, title="storage")
@storage.setter
def storage(self, storage: ContractData) -> None:
if self.address:
raise Exception('Can\'t set storage of deployed contract')
self._storage = storage
def storage_from_file(self, path: str) -> None:
"""Load contract storage from file
:param path: path to .tz file
"""
with open(path) as file:
expr = michelson_to_micheline(file.read())
self.storage_from_micheline(expr)
def storage_from_micheline(self, expression) -> None:
"""Load contract storage from Micheline expression
:param expression: Micheline expression
"""
storage = self.program.storage.from_micheline_value(expression)
storage.attach_context(self.context)
self.storage = ContractData(self.context, storage.item, title="storage")
def storage_from_michelson(self, source: str) -> None:
"""Load contract storage from Michelson code
:param source: Michelson code
"""
expr = michelson_to_micheline(source)
self.storage_from_micheline(expr)
@cached_property
def metadata(self) -> Optional[ContractMetadata]:
"""Get TZIP-016 contract metadata, if exists
:rtype: ContractMetadata
"""
metadata_url = self.metadata_url
if metadata_url is None:
return None
logger.info('Trying to fetch contract metadata from `%s`', metadata_url)
parsed_url = urlparse(metadata_url)
if parsed_url.scheme in ('http', 'https'):
# NOTE: KT1B34qXVRfQrScEaqjjt6cJ5G8LtVFZ7fSc
metadata = ContractMetadata.from_url(metadata_url, self.context)
elif parsed_url.scheme == 'ipfs':
# NOTE: KT1AFA2mwNUMNd4SsujE1YYp29vd8BZejyKW
metadata = ContractMetadata.from_ipfs(parsed_url.netloc, self.context)
elif parsed_url.scheme == 'tezos-storage':
parts = parsed_url.path.split('/')
if len(parts) == 1:
# NOTE: KT1JBThDEqyqrEHimhxoUBCSnsKAqFcuHMkP
storage = self.storage
elif len(parts) == 2:
# NOTE: KT1REEb5VxWRjcHm5GzDMwErMmNFftsE5Gpf
context = self._spawn_context(address=parsed_url.netloc)
storage = ContractInterface.from_context(context).storage
else:
raise NotImplementedError('Unknown metadata URL scheme')
metadata_json = json.loads(storage['metadata'][parts[-1]]().decode())
metadata = ContractMetadata.from_json(metadata_json, self.context)
elif parsed_url.scheme == 'sha256':
raise NotImplementedError
else:
raise NotImplementedError('Unknown metadata URL scheme')
return metadata
@property
def token_metadata(self) -> ContractTokenMetadataProxy:
"""Get TZIP-021 contract token metadata proxy
:rtype: ContractTokenMetadataProxy
"""
return ContractTokenMetadataProxy(self._get_token_metadata) # type: ignore
@lru_cache(maxsize=None)
def _get_token_metadata(self, token_id: int) -> Optional[ContractTokenMetadata]:
token_metadata = self._get_token_metadata_from_view(token_id)
if token_metadata is None:
token_metadata = self._get_token_metadata_from_storage(token_id)
return token_metadata
def _get_token_metadata_from_storage(self, token_id: int) -> Optional[ContractTokenMetadata]:
self._logger.info('Trying to fetch token %s metadata from storage', token_id)
try:
token_metadata_url = self.storage['token_metadata'][token_id]['token_info']['']().decode()
# FIXME: Dirty
except (KeyError, AssertionError):
self._logger.info('Storage doesn\'t contain metadata URL for token %s', token_id)
return None
self._logger.info('Trying to fetch contract token metadata from `%s`', token_metadata_url)
parsed_url = urlparse(token_metadata_url)
if parsed_url.scheme in ('http', 'https'):
token_metadata = ContractTokenMetadata.from_url(token_metadata_url, self.context)
elif parsed_url.scheme == 'ipfs':
token_metadata = ContractTokenMetadata.from_ipfs(parsed_url.netloc, self.context)
elif parsed_url.scheme == 'tezos-storage':
parts = parsed_url.path.split('/')
if len(parts) == 1:
storage = self.storage
elif len(parts) == 2:
context = self._spawn_context(address=parsed_url.netloc)
storage = ContractInterface.from_context(context).storage
else:
raise NotImplementedError('Unknown metadata URL scheme')
token_metadata_json = json.loads(storage['metadata'][parts[-1]]().decode())
token_metadata = ContractTokenMetadata.from_json(token_metadata_json, self.context)
elif parsed_url.scheme == 'sha256':
raise NotImplementedError
else:
raise NotImplementedError('Unknown metadata URL scheme')
return token_metadata
def _get_token_metadata_from_view(self, token_id: int) -> Optional[ContractTokenMetadata]:
self._logger.info('Trying to fetch token %s metadata from off-chain view', token_id)
try:
token_metadata_json = self.metadata.tokenMetadata(token_id).storage_view()[1]
return ContractTokenMetadata.from_json(token_metadata_json)
except KeyError:
self._logger.info('There\'s no off-chain view named `token_metadata`')
return None
except MichelsonRuntimeError:
self._logger.info('Off-chain view has no token metadata for token_id %s', token_id)
return None
@cached_property
def metadata_url(self) -> Optional[str]:
try:
return self.storage['metadata']['']().decode()
# FIXME: Dirty
except (KeyError, AssertionError):
return None
@property
def parameter(self) -> ContractEntrypoint:
root_name = self.program.parameter.root_name
assert root_name in self.entrypoints, 'root entrypoint is undefined'
return getattr(self, root_name)
@property # type: ignore
@deprecated(deprecated_in='3.0.0', removed_in='3.1.0', details='access `ContractInterface` directly')
def contract(self) -> 'ContractInterface':
return self
@property # type: ignore
@deprecated(deprecated_in='3.0.0', removed_in='3.1.0', details='use `to_michelson()` instead')
def text(self) -> str:
return self.to_michelson()
@property # type: ignore
@deprecated(deprecated_in='3.0.0', removed_in='3.1.0', details='use `to_micheline()` instead')
def code(self):
return self.to_micheline()
@property # type: ignore
@deprecated(deprecated_in='3.0.0', removed_in='3.1.0', details='use `default()` instead')
def call(self) -> ContractEntrypoint:
return self.parameter
def operation_result(self, operation_group: | |
<gh_stars>100-1000
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Miscellaneous logical units that don't fit into any category."""
import logging
import time
from ganeti import constants
from ganeti import errors
from ganeti import locking
from ganeti import qlang
from ganeti import query
from ganeti import utils
from ganeti.cmdlib.base import NoHooksLU, QueryBase
from ganeti.cmdlib.common import GetWantedNodes, SupportsOob
class LUOobCommand(NoHooksLU):
"""Logical unit for OOB handling.
"""
REQ_BGL = False
_SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
def ExpandNames(self):
"""Gather locks we need.
"""
if self.op.node_names:
(self.op.node_uuids, self.op.node_names) = \
GetWantedNodes(self, self.op.node_names)
lock_node_uuids = self.op.node_uuids
else:
lock_node_uuids = locking.ALL_SET
self.needed_locks = {
locking.LEVEL_NODE: lock_node_uuids,
}
def CheckPrereq(self):
"""Check prerequisites.
This checks:
- the node exists in the configuration
- OOB is supported
Any errors are signaled by raising errors.OpPrereqError.
"""
self.nodes = []
self.master_node_uuid = self.cfg.GetMasterNode()
master_node_obj = self.cfg.GetNodeInfo(self.master_node_uuid)
assert self.op.power_delay >= 0.0
if self.op.node_uuids:
if (self.op.command in self._SKIP_MASTER and
master_node_obj.uuid in self.op.node_uuids):
master_oob_handler = SupportsOob(self.cfg, master_node_obj)
if master_oob_handler:
additional_text = ("run '%s %s %s' if you want to operate on the"
" master regardless") % (master_oob_handler,
self.op.command,
master_node_obj.name)
else:
additional_text = "it does not support out-of-band operations"
raise errors.OpPrereqError(("Operating on the master node %s is not"
" allowed for %s; %s") %
(master_node_obj.name, self.op.command,
additional_text), errors.ECODE_INVAL)
else:
self.op.node_uuids = self.cfg.GetNodeList()
if self.op.command in self._SKIP_MASTER:
self.op.node_uuids.remove(master_node_obj.uuid)
if self.op.command in self._SKIP_MASTER:
assert master_node_obj.uuid not in self.op.node_uuids
for node_uuid in self.op.node_uuids:
node = self.cfg.GetNodeInfo(node_uuid)
if node is None:
raise errors.OpPrereqError("Node %s not found" % node_uuid,
errors.ECODE_NOENT)
self.nodes.append(node)
if (not self.op.ignore_status and
(self.op.command == constants.OOB_POWER_OFF and not node.offline)):
raise errors.OpPrereqError(("Cannot power off node %s because it is"
" not marked offline") % node.name,
errors.ECODE_STATE)
def Exec(self, feedback_fn):
"""Execute OOB and return result if we expect any.
"""
ret = []
for idx, node in enumerate(utils.NiceSort(self.nodes,
key=lambda node: node.name)):
node_entry = [(constants.RS_NORMAL, node.name)]
ret.append(node_entry)
oob_program = SupportsOob(self.cfg, node)
if not oob_program:
node_entry.append((constants.RS_UNAVAIL, None))
continue
logging.info("Executing out-of-band command '%s' using '%s' on %s",
self.op.command, oob_program, node.name)
result = self.rpc.call_run_oob(self.master_node_uuid, oob_program,
self.op.command, node.name,
self.op.timeout)
if result.fail_msg:
self.LogWarning("Out-of-band RPC failed on node '%s': %s",
node.name, result.fail_msg)
node_entry.append((constants.RS_NODATA, None))
continue
try:
self._CheckPayload(result)
except errors.OpExecError as err:
self.LogWarning("Payload returned by node '%s' is not valid: %s",
node.name, err)
node_entry.append((constants.RS_NODATA, None))
else:
if self.op.command == constants.OOB_HEALTH:
# For health we should log important events
for item, status in result.payload:
if status in [constants.OOB_STATUS_WARNING,
constants.OOB_STATUS_CRITICAL]:
self.LogWarning("Item '%s' on node '%s' has status '%s'",
item, node.name, status)
if self.op.command == constants.OOB_POWER_ON:
node.powered = True
elif self.op.command == constants.OOB_POWER_OFF:
node.powered = False
elif self.op.command == constants.OOB_POWER_STATUS:
powered = result.payload[constants.OOB_POWER_STATUS_POWERED]
if powered != node.powered:
logging.warning(("Recorded power state (%s) of node '%s' does not"
" match actual power state (%s)"), node.powered,
node.name, powered)
# For configuration changing commands we should update the node
if self.op.command in (constants.OOB_POWER_ON,
constants.OOB_POWER_OFF):
self.cfg.Update(node, feedback_fn)
node_entry.append((constants.RS_NORMAL, result.payload))
if (self.op.command == constants.OOB_POWER_ON and
idx < len(self.nodes) - 1):
time.sleep(self.op.power_delay)
return ret
def _CheckPayload(self, result):
"""Checks if the payload is valid.
@param result: RPC result
@raises errors.OpExecError: If payload is not valid
"""
errs = []
if self.op.command == constants.OOB_HEALTH:
if not isinstance(result.payload, list):
errs.append("command 'health' is expected to return a list but got %s" %
type(result.payload))
else:
for item, status in result.payload:
if status not in constants.OOB_STATUSES:
errs.append("health item '%s' has invalid status '%s'" %
(item, status))
if self.op.command == constants.OOB_POWER_STATUS:
if not isinstance(result.payload, dict):
errs.append("power-status is expected to return a dict but got %s" %
type(result.payload))
if self.op.command in [
constants.OOB_POWER_ON,
constants.OOB_POWER_OFF,
constants.OOB_POWER_CYCLE,
]:
if result.payload is not None:
errs.append("%s is expected to not return payload but got '%s'" %
(self.op.command, result.payload))
if errs:
raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
utils.CommaJoin(errs))
class ExtStorageQuery(QueryBase):
FIELDS = query.EXTSTORAGE_FIELDS
def ExpandNames(self, lu):
# Lock all nodes in shared mode
# Temporary removal of locks, should be reverted later
# TODO: reintroduce locks when they are lighter-weight
lu.needed_locks = {}
#self.share_locks[locking.LEVEL_NODE] = 1
#self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
# The following variables interact with _QueryBase._GetNames
if self.names:
self.wanted = [lu.cfg.GetNodeInfoByName(name).uuid for name in self.names]
else:
self.wanted = locking.ALL_SET
self.do_locking = self.use_locking
def DeclareLocks(self, lu, level):
pass
@staticmethod
def _DiagnoseByProvider(rlist):
"""Remaps a per-node return list into an a per-provider per-node dictionary
@param rlist: a map with node uuids as keys and ExtStorage objects as values
@rtype: dict
@return: a dictionary with extstorage providers as keys and as
value another map, with node uuids as keys and tuples of
(path, status, diagnose, parameters) as values, eg::
{"provider1": {"node_uuid1": [(/usr/lib/..., True, "", [])]
"node_uuid2": [(/srv/..., False, "missing file")]
"node_uuid3": [(/srv/..., True, "", [])]
}
"""
all_es = {}
# we build here the list of nodes that didn't fail the RPC (at RPC
# level), so that nodes with a non-responding node daemon don't
# make all OSes invalid
good_nodes = [node_uuid for node_uuid in rlist
if not rlist[node_uuid].fail_msg]
for node_uuid, nr in rlist.items():
if nr.fail_msg or not nr.payload:
continue
for (name, path, status, diagnose, params) in nr.payload:
if name not in all_es:
# build a list of nodes for this os containing empty lists
# for each node in node_list
all_es[name] = {}
for nuuid in good_nodes:
all_es[name][nuuid] = []
# convert params from [name, help] to (name, help)
params = [tuple(v) for v in params]
all_es[name][node_uuid].append((path, status, diagnose, params))
return all_es
def _GetQueryData(self, lu):
"""Computes the list of nodes and their attributes.
"""
valid_nodes = [node.uuid
for node in lu.cfg.GetAllNodesInfo().values()
if not node.offline and node.vm_capable]
pol = self._DiagnoseByProvider(lu.rpc.call_extstorage_diagnose(valid_nodes))
data = {}
nodegroup_list = lu.cfg.GetNodeGroupList()
for (es_name, es_data) in pol.items():
# For every provider compute the nodegroup validity.
# To do this we need to check the validity of each node in es_data
# and then construct the corresponding nodegroup dict:
# { nodegroup1: status
# nodegroup2: status
# }
ndgrp_data = {}
for nodegroup in nodegroup_list:
ndgrp = lu.cfg.GetNodeGroup(nodegroup)
nodegroup_nodes = ndgrp.members
nodegroup_name = ndgrp.name
node_statuses = []
for node in nodegroup_nodes:
if node in valid_nodes:
if es_data[node] != []:
node_status = es_data[node][0][1]
node_statuses.append(node_status)
else:
node_statuses.append(False)
if False in node_statuses:
ndgrp_data[nodegroup_name] = False
else:
ndgrp_data[nodegroup_name] = True
# Compute the provider's parameters
parameters = set()
for idx, esl in enumerate(es_data.values()):
valid = bool(esl and esl[0][1])
if not valid:
break
node_params = esl[0][3]
if idx == 0:
# First entry
parameters.update(node_params)
else:
# Filter out inconsistent values
parameters.intersection_update(node_params)
params = list(parameters)
# Now fill all the info for this provider
info = query.ExtStorageInfo(name=es_name, node_status=es_data,
nodegroup_status=ndgrp_data,
parameters=params)
data[es_name] = info
# Prepare data in requested order
return [data[name] for name in self._GetNames(lu, list(pol), None)
if name in data]
class LUExtStorageDiagnose(NoHooksLU):
"""Logical unit for ExtStorage diagnose/query.
"""
REQ_BGL = False
def CheckArguments(self):
self.eq = ExtStorageQuery(qlang.MakeSimpleFilter("name", self.op.names),
self.op.output_fields, False)
def ExpandNames(self):
self.eq.ExpandNames(self)
def Exec(self, feedback_fn):
return self.eq.OldStyleQuery(self)
class LURestrictedCommand(NoHooksLU):
"""Logical unit for executing restricted commands.
"""
REQ_BGL = False
def ExpandNames(self):
if self.op.nodes:
(self.op.node_uuids, self.op.nodes) = | |
<reponame>jhurley13/automating-cbc
# taxonomy
# from taxonomy import Taxonomy
import sys
import traceback
from pathlib import Path
from typing import Tuple, Optional, Any, List
from IPython.display import display
# https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html
from pandas.api.types import CategoricalDtype
import numpy as np
import pandas as pd
from singleton_decorator import singleton
import numbers
from ebird_extras import EBirdExtra
from taxonomy_clements import TaxonomyClements
from taxonomy_ioc import TaxonomyIOC
from taxonomy_nacc import TaxonomyNACC
from taxonomy_aba import TaxonomyABA
# Base Path
"""
https://ebird.org/science/the-ebird-taxonomy
Spuh: Genus or identification at broad level, e.g., swan sp. Cygnus sp.
Slash: Identification to Species-pair, e.g., Tundra/Trumpeter Swan Cygnus
columbianus/buccinator
Species: e.g., Tundra Swan Cygnus columbianus
ISSF or Identifiable Sub-specific Group: Identifiable subspecies or group of
subspecies, e.g., Tundra Swan (Bewick’s) Cygnus columbianus bewickii or Tundra
Swan (Whistling) Cygnus columbianus columbianus
Hybrid: Hybrid between two species, e.g., Tundra x Trumpeter Swan (hybrid)
Intergrade: Hybrid between two ISSF (subspecies or subspecies groups), e.g.,
Tundra Swan (Whistling x Bewick’s) Cygnus columbianus columbianus x bewickii
Domestic: Distinctly-plumaged domesticated varieties that may be free-flying
(these do not count on personal lists) e.g., Mallard (Domestic type)
Form: Miscellaneous other taxa, including recently-described species yet to be
accepted or distinctive forms that are not universally accepted, e.g.,
Red-tailed Hawk (abieticola), Upland Goose (Bar-breasted)
https://www.birds.cornell.edu/clementschecklist/
Note:
tt[(tt.TAXON_ORDER != tt.Clem_Seq) & (tt.Clem_Seq != '')] is empty, i.e. for records
with Clem_Seq, it matches TAXON_ORDER
"""
"""
Notes on reference sources
The base used for the taxonomy is the eBird/Clements taxonomy, for three main reasons.
- It will match up with species reported through eBird
- It has the taxon_order field for sorting
- It contains hybrids and SPUH entries
--------------------------------------------------------------------------------
Suggested citation for the current version of the Clements Checklist, including the August 2019
Updates and Corrections:
<NAME>., <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>. 2019. The eBird/Clements Checklist of Birds of the World: v2019. Downloaded from
https://www.birds.cornell.edu/clementschecklist/download/
https://www.birds.cornell.edu/clementschecklist/download/
Three checklists are available. The first is the 2019 edition of the Clements Checklist (Clements
Checklist v2019); the second is the 2019 edition of the eBird taxonomy (eBird v2019); and the third
is the “master” or integrated checklist, which includes all entries in both the Clements Checklist
and the eBird taxonomy.
clements_base = 'https://www.birds.cornell.edu/clementschecklist/wp-content/uploads/2019/08'
Clements Checklist v2019:
{clements_base}/Clements-Checklist-v2019-August-2019.xlsx
eBird Taxonomy v2019:
{clements_base}/eBird_Taxonomy_v2019.xlsx
eBird/Clements Checklist v2019:
{clements_base}/eBird-Clements-v2019-integrated-checklist-August-2019.xlsx
--------------------------------------------------------------------------------
https://www.worldbirdnames.org/new/
<NAME>, <NAME> & <NAME> (Eds). 2020. IOC World Bird List (v10.2).
doi : 10.14344/IOC.ML.10.2.
https://www.worldbirdnames.org/new/ioc-lists/master-list-2/
Comparison of IOC 10.2 with Clements 2019 (<NAME>)
http://www.worldbirdnames.org/IOC%20v10-2%20v%20Clements%202019.xlsx
This is the one we use for IOC as it has both Clements and IOC sequence numbers. This is
also the only one on this site with "tidy" data suitable for data science
Master List
http://www.worldbirdnames.org/master_ioc_list_v10.2.xlsx
Cross reference between IOC 10.2 and Clements v2019, HBW/BL(vol1, vol2), H&M4.1, HBW, Peters,
TiF 3.10, HBW/BirdLife v4 (2019), S&M '93, IOC10.1. Simplified version.
http://www.worldbirdnames.org/IOC_10.2_vs_other_lists.xlsx
http://www.worldbirdnames.org/IOC_Names_File_Plus-10.2_full_ssp.xlsx
--------------------------------------------------------------------------------
OTHERS
The Howard & Moore Complete Checklist of the Birds of the World, 4th Edition
The Trust for Avian Systematics
https://www.aviansystematics.org/index
Not used; not available in electronic form
Zoological Nomenclature Resource
http://www.zoonomen.net
<NAME>, M.D.
--------------------------------------------------------------------------------
"""
MISSING_TAXON_ORDER = 0 # or 99999, depends where we want those to sort
@singleton
class Taxonomy(object):
"""Combined Taxonomy
Attributes:
"""
def __init__(self, cache_path: Path = None, ebird_extra: EBirdExtra = None):
self._cache_path = cache_path
self._ebird_extra = ebird_extra
taxonomy_base_path = Path(__file__).parent.absolute()
self.taxonomy_reference_path = taxonomy_base_path / 'reference'
# Fill these lazily
self.taxonomy = None
self._taxonomy_clements = None # TaxonomyClements().get_taxonomy()
self._taxonomy_ioc = None # TaxonomyIOC().get_taxonomy()
self._taxonomy_nacc = None # TaxonomyNACC().get_taxonomy()
self._taxonomy_aba = None
self._taxonomy_ebird = None
self.INVALID_NACC_SORT_ORDER = 999999.1 # set again from NACC
self.taxonomy = self.get_taxonomy_cached()
def fix_up_merged_taxonomy(self):
self.taxonomy['taxonOrder'] = self.taxonomy['taxonOrder'].fillna(MISSING_TAXON_ORDER)
self.taxonomy['extinct'] = self.taxonomy['extinct'].fillna(False)
self.taxonomy['extinctYear'] = self.taxonomy['extinctYear'].replace(0.0, '')
# self.INVALID_NACC_SORT_ORDER = self._taxonomy_nacc.INVALID_NACC_SORT_ORDER
# Renames
try:
self.taxonomy.rename(columns={'category': 'Category'}, inplace=True)
except AttributeError:
pass
# species should be first, spuh last, the others don't matter
ordered_categories = ['species', 'issf', 'slash', 'hybrid', 'form',
'intergrade', 'domestic', 'spuh']
cat_type = CategoricalDtype(categories=ordered_categories, ordered=True)
# Writing to CSV will strip categorical information, so need to add after reading cache
self.taxonomy.Category = self.taxonomy.Category.astype(cat_type)
# self.taxonomy.NACC_SORT_ORDER.fillna(0, inplace=True)
xdtypes = {
'sciName': str, 'comName': str, 'speciesCode': str, 'Category': str,
'taxonOrder': int,
'bandingCodes': str, 'comNameCodes': str, 'sciNameCodes': str, 'order': str,
'familyComName': str, 'familySciName': str, 'reportAs': str, 'extinct': bool,
'extinctYear': str,
'comNameLower': str, 'sciNameLower': str, 'TAXON_ORDER': int, 'CATEGORY': str,
'SPECIES_CODE': str, 'PRIMARY_COM_NAME': str, 'SCI_NAME': str, 'ORDER1': str,
'FAMILY': str,
'SPECIES_GROUP': str, 'REPORT_AS': str, 'ioc_seq': int,
'ioc_scientific_name': str,
'ioc_common_name': str, 'ioc_clements_seq': int,
'ioc_clements_scientific_name': str,
'ioc_clements_common_name': str, 'ioc_range': str, 'NACC_SORT_ORDER': float,
'ABA_SORT_ORDER': float, 'nacc_id': str,
'nacc_avibase_id': str, 'nacc_rank': str, 'nacc_common_name': str, 'nacc_order': str,
'nacc_family': str, 'nacc_subfamily': str, 'nacc_genus': str, 'nacc_species': str,
'nacc_common_name_lower': str
}
self.taxonomy.ioc_seq = self.taxonomy.ioc_seq.replace('', 0)
self.taxonomy.ioc_clements_seq = self.taxonomy.ioc_clements_seq.replace('', 0)
self.taxonomy.NACC_SORT_ORDER = self.taxonomy.NACC_SORT_ORDER.replace('', 0.0)
self.taxonomy.ABA_SORT_ORDER = self.taxonomy.ABA_SORT_ORDER.replace('', 0.0)
self.taxonomy = self.taxonomy.astype(dtype=xdtypes)
# Fix up any remaining NA values
colnames_numerics_only = self.taxonomy.select_dtypes(include=np.number).columns.tolist()
if 'Category' in colnames_numerics_only:
colnames_numerics_only.remove('Category')
almost_all_cols = list(self.taxonomy.columns)
almost_all_cols.remove('Category')
fill_values = {col: 0 if col in colnames_numerics_only else ''
for col in almost_all_cols}
self.taxonomy.fillna(fill_values, inplace=True)
#
# for col in colnames_numerics_only:
# self.taxonomy[col] = self.taxonomy[col].astype(int)
# for col in self.taxonomy.columns:
# newtype = xdtypes.get(col, str)
# self.taxonomy[col] = self.taxonomy[col].astype(newtype)
def get_taxonomy_cached(self) -> pd.DataFrame:
cached_taxonomy_path = self._cache_path / 'taxonomy_full.csv'
try:
if cached_taxonomy_path.is_file():
self.taxonomy = pd.read_csv(cached_taxonomy_path,
index_col=False, low_memory=False)
self.fix_up_merged_taxonomy()
else:
print(f'Creating full taxonomy cache...')
# EBird API taxonomy is the base
self._taxonomy_ebird = self.get_taxonomy_api_cached()
self.taxonomy = self._taxonomy_ebird.copy()
# print(f'ebird: {self.taxonomy.shape}')
self._taxonomy_clements = TaxonomyClements().get_taxonomy()
self._taxonomy_ioc = TaxonomyIOC().get_taxonomy()
self._taxonomy_nacc = TaxonomyNACC().get_taxonomy()
self._taxonomy_aba = TaxonomyABA().get_taxonomy()
# Now merge in Clements, IOC and NACC checklists
self.taxonomy = self.merge_clements_into_taxonomy()
# print(f'clements: {self.taxonomy.shape}')
self.taxonomy = self.merge_ioc_into_taxonomy()
# print(f'ioc: {self.taxonomy.shape}')
self.taxonomy = self.merge_nacc_into_taxonomy()
# print(f'nacc: {self.taxonomy.shape}')
self.taxonomy = self.merge_aba_into_taxonomy()
self.fix_up_merged_taxonomy()
# print(f'fixu: {self.taxonomy.shape}')
print('Adding synthesized NACC sort orders')
self.add_synthesized_sort_orders('NACC_SORT_ORDER')
print('Adding synthesized ABA sort orders')
self.add_synthesized_sort_orders('ABA_SORT_ORDER')
self.taxonomy.to_csv(cached_taxonomy_path, index=False)
print(f'Written to cache: {self.taxonomy.shape[0]} records')
except Exception as ee:
print(ee)
traceback.print_exc(file=sys.stdout)
# Fill in code4 column
# self.fill_code4s()
# print(f'exit: {self.taxonomy.shape}')
return self.taxonomy
def fill_code4s(self):
code4s = []
for ix, row in self.taxonomy.iterrows():
if row.Category != 'species':
code4s.append(None)
elif len(row.banding_codes) == 1:
code4s.append(list(row.banding_codes)[0])
elif len(row.comname_codes) > 0:
code4s.append(list(row.comname_codes)[0])
else:
code4s.append(None)
self.taxonomy['code4'] = code4s
def get_taxonomy_api_cached(self) -> pd.DataFrame:
taxonomy_df = pd.DataFrame()
cached_taxonomy_path = self._cache_path / 'taxonomy_ebird_api.csv'
try:
if cached_taxonomy_path.is_file():
taxonomy_df = pd.read_csv(cached_taxonomy_path, index_col=False)
else:
print(f'Creating eBird taxonomy cache...')
taxonomy_df = self._ebird_extra.get_taxonomy_from_ebird()
taxonomy_df['comNameLower'] = taxonomy_df.comName.apply(lambda x: x.lower())
taxonomy_df['sciNameLower'] = taxonomy_df.sciName.apply(lambda x: x.lower())
taxonomy_df.to_csv(cached_taxonomy_path, index=False)
except Exception as ee:
print(ee)
traceback.print_exc(file=sys.stdout)
return taxonomy_df
def find_local_name(self, local_name) -> \
Tuple[Optional[Any], Optional[Any], Optional[Any], Optional[Any]]:
record = self.find_local_name_row(local_name)
if not record:
return None, None, None, None
return record.comName, record.TAXON_ORDER, record.SPECIES_GROUP, record.NACC_SORT_ORDER
def find_local_name_row(self, common_name) -> Optional[pd.Series]:
# Look for exact matches
if not common_name:
return None
record = None
try:
common_name_lower = common_name.lower()
mask = self.taxonomy.comNameLower == common_name_lower
records = self.taxonomy[mask]
record = records.iloc[0]
except IndexError:
pass
return record
def find_scientific_name_row(self, scientific_name) -> Optional[pd.Series]:
# Look for exact matches
if not scientific_name:
return None
record = None
try:
scientific_name_lower = scientific_name.lower()
mask = self.taxonomy.sciNameLower == scientific_name_lower
records = self.taxonomy[mask]
record = records.iloc[0]
except IndexError:
pass
return record
# @property
# def local_to_ebird_translations(self):
# return self._local_to_ebird_translations
def species6_to_common_name(self, species6):
commonname = species6
try:
commonname = self.taxonomy[self.taxonomy.speciesCode == species6.lower()].iloc[
0].comName
except Exception as ee:
print(f'{species6} not found: {ee}')
traceback.print_exc(file=sys.stdout)
return commonname
# def species6_to_common_name_aou(self, species6):
# commonname = species6
# try:
# species6u = species6.upper()
# commonname = aou_codes[aou_codes.SPEC6 == species6.upper()][0].COMMONNAME
# except Exception as ee:
# print(f'{species6} not found: {ee}')
#
# return commonname
def find_species6_ebird(self, common_name):
try:
# common_name_u = common_name.upper()
# commonname = ebird_taxonomy[ebird_taxonomy.SPECIES_CODE ==
# species6.lower()].iloc[0].COMMON_NAME
# ebird-api uses speciesCode
species6 = self.taxonomy[self.taxonomy.comName == common_name].iloc[0].speciesCode
except Exception as ee:
# print(f'{common_name} not found: {ee} [find_species6_ebird]')
species6 = None
return species6
def merge_clements_into_taxonomy(self) -> pd.DataFrame:
self.taxonomy = self.taxonomy.merge(self._taxonomy_clements,
left_on='comName',
right_on='PRIMARY_COM_NAME', how='left').fillna('')
return self.taxonomy
def merge_ioc_into_taxonomy(self) -> pd.DataFrame:
self.taxonomy = self.taxonomy.merge(self._taxonomy_ioc, left_on='comName',
right_on='ioc_clements_common_name',
how='left').fillna('')
return self.taxonomy
def merge_nacc_into_taxonomy(self) -> pd.DataFrame:
self.taxonomy = self.taxonomy.merge(self._taxonomy_nacc, left_on='comName',
right_on='nacc_common_name', how='left').fillna('')
return self.taxonomy
def merge_aba_into_taxonomy(self) -> pd.DataFrame:
self.taxonomy = self.taxonomy.merge(self._taxonomy_aba, left_on='comName',
right_on='aba_common_name', how='left').fillna('')
return self.taxonomy
def get_nacc_taxonomy(self) -> pd.DataFrame:
return self._taxonomy_nacc
# -------------------------------- NACC Ordering --------------------------------------------
@staticmethod
def identify_family_sort_orders(family: pd.DataFrame, sort_col: str) -> list:
# family e.g. 'Grebes'
need_order = family[family.Category != 'species']
family_species = family[family.Category == 'species'] # .reset_index(drop=True)
sort_orders = []
base_sort_order = 0
for ix, row in need_order.iterrows():
try:
base_sort_order = 0
if row.Category == 'spuh':
base_sort_order = max(family[sort_col])
else:
bc_mask = [len(row.comname_codes & bc) > 0 for bc in
family_species.banding_codes]
if any(bc_mask):
mask = bc_mask
else:
cn_mask = [len(row.comname_codes | |
<reponame>marza-animation-planet/das<gh_stars>1-10
import das
import re
import fnmatch
import math
NoUI = False
try:
import Qt # pylint: disable=import-error
from Qt import QtCore # pylint: disable=import-error
from Qt import QtGui # pylint: disable=import-error
from Qt import QtWidgets # pylint: disable=import-error
from Qt import QtCompat # pylint: disable=import-error
except Exception, e:
print("Failed to import Qt (%s)" % e)
NoUI = True
if not NoUI:
def IsPySide2():
if hasattr(Qt, "__version_info__"):
return Qt.__version_info__[0] >= 2
if hasattr(Qt, "IsPySide2"):
return Qt.IsPySide2
return False
class FieldFilter(object):
def __init__(self, name):
super(FieldFilter, self).__init__()
self.name = name
def copy(self):
raise Exception("Not implemented")
def matches(self, fullname):
raise Exception("Not implemented")
class GlobFilter(FieldFilter):
def __init__(self, name, pattern, invert=False):
super(GlobFilter, self).__init__(name)
self.pattern = pattern
self.invert = invert
def copy(self):
return GlobFilter(self.name, self.pattern, self.invert)
def matches(self, fullname):
rv = fnmatch.fnmatch(fullname, self.pattern)
return ((not rv) if self.invert else rv)
class RegexFilter(FieldFilter):
def __init__(self, name, pattern, partial=False, invert=False):
super(RegexFilter, self).__init__(name)
self.regex = re.compile(pattern)
self.partial = partial
self.invert = invert
def copy(self):
return RegexFilter(self.name, self.regex.pattern, self.partial, self.invert)
def matches(self, fullname):
if self.partial:
rv = (self.regex.search(fullname) is not None)
else:
rv = (self.regex.match(fullname) is not None)
return ((not rv) if self.invert else rv)
class ListFilter(FieldFilter):
def __init__(self, name, values, partial=False, invert=False):
super(ListFilter, self).__init__(name)
self.values = set(map(str, values))
self.partial = partial
self.invert = invert
def copy(self):
return ListFilter(self.name, self.values, self.partial, self.invert)
def matches(self, fullname):
if self.partial:
rv = True
for item in self.values:
if item in fullname:
rv = False
break
else:
rv = (fullname in self.values)
return ((not rv) if self.invert else rv)
class FilterSet(FieldFilter):
All = 0
Any = 1
def __init__(self, name, mode, invert=False):
super(FilterSet, self).__init__(name)
self.filters = []
if mode != self.All and mode != self.Any:
raise Exception("[das] Invalid FilterSet mode: %d" % mode)
self.mode = mode
self.invert = invert
def copy(self):
rv = FilterSet(self.name, self.mode, self.invert)
rv.filters = map(lambda x: x.copy(), self.filters)
return rv
def matches(self, fullname):
if self.mode == self.All:
rv = True
for f in self.filters:
if not f.matches(fullname):
rv = False
break
else:
rv = False
for f in self.filters:
if f.matches(fullname):
rv = True
break
return ((not rv) if self.invert else rv)
def add(self, flt, force=False):
for i in xrange(len(self.filters)):
if flt.name == self.filters[i].name:
if force:
self.filters[i] = flt.copy()
return True
else:
return False
self.filters.append(flt.copy())
return True
def count(self):
return len(self.filters)
def at(self, idx):
return self.filters[idx]
def get(self, name):
for f in self.filters:
if f.name == name:
return f
return None
def clear(self):
self.filters = []
def remove(self, idxOrName):
if isinstance(idxOrName, basestring):
idx = -1
for i in xrange(len(self.filters)):
if idxOrName == self.filters[i].name:
idx = i
break
if idx != -1:
del(self.filters[idx])
return True
else:
return False
else:
try:
del(self.filters[idxOrName])
return True
except:
return False
class ModelItem(object):
ReservedTypeNames = set(["alias", "empty", "boolean", "integer", "real", "string", "list", "tuple", "set", "dict", "struct"])
def __init__(self, name, row=0, key=None, parent=None):
super(ModelItem, self).__init__()
self.row = row
self.key = key
self.name = name
self.parent = parent
self.reset_members()
def __str__(self):
return "ModelItem(%s, compound=%s, resizable=%s, multi=%s, editable=%s, optional=%s, parent=%s, children=%d)" % (self.name, self.compound, self.resizable, self.multi, self.editable, self.optional, ("None" if not self.parent else self.parent.name), len(self.children))
def reset_members(self):
self.children = []
self.compound = False
self.mapping = False
self.mappingkeys = None
self.mappingkeytype = None
self.uniformmapping = True # mapping of uniform type values
self.resizable = False
self.orderable = False # orderable compound
self.optional = False
self.deprecated = False
self.editable = False # will be false for aliases
self.editableType = True # is value tagged as editable in the schema type
self.editableValue = True # is value actually editable in the UI
self.multi = False
self.data = None # for alias, it is the same data as the original
self.type = None
self.baseType = None
self.typestr = ""
self.desc = ""
def fullname(self, skipRoot=False):
k = ""
item = self
while item:
if skipRoot and not item.parent:
break
suffix = ("" if not k else (".%s" % k))
k = item.name + suffix
item = item.parent
return k
def get_description(self, typ):
desc = typ.description
while not desc:
if isinstance(typ, das.schematypes.SchemaType):
typ = das.get_schema_type(typ.name)
elif isinstance(typ, das.schematypes.Or):
if len(typ.types) == 1:
typ = typ.types[0]
else:
break
elif isinstance(typ, das.schematypes.Optional):
typ = typ.type
else:
break
desc = typ.description
return desc
def real_type(self, typ):
while True:
if isinstance(typ, das.schematypes.SchemaType):
typ = das.get_schema_type(typ.name)
elif isinstance(typ, das.schematypes.Or):
if len(typ.types) == 1:
typ = typ.types[0]
else:
break
elif isinstance(typ, das.schematypes.Optional):
typ = typ.type
else:
break
return typ
def is_compound(self, t):
if isinstance(self.real_type(t), (das.schematypes.Tuple,
das.schematypes.Sequence,
das.schematypes.Set,
das.schematypes.Struct,
das.schematypes.StaticDict,
das.schematypes.Dict,
das.schematypes.DynamicDict)):
return True
else:
return False
def is_editable(self, t):
rt = self.real_type(t)
if self.is_compound(rt):
return False
else:
if isinstance(rt, das.schematypes.Or):
for ot in rt.types:
# Don't allow more that one depth of Or
if isinstance(self.real_type(ot), das.schematypes.Or):
return False
# All possible types for a Or must be editable
if not self.is_editable(ot):
return False
elif isinstance(rt, das.schematypes.Class):
return (hasattr(rt.klass, "string_to_value") and hasattr(rt.klass, "value_to_string"))
elif das.schematypes.Alias.Check(rt):
return False
return True
def class_name(self, klass):
cn = self.data.__class__.__name__
mn = self.data.__class__.__module__
if mn not in ("__builtin__", "__main__"):
cn = mn + "." + cn
if cn in self.ReservedTypeNames:
cn += " (user)"
# convert das.types.SomeType to someType
if cn.startswith("das.types."):
cn = cn[10].lower() + cn[11:]
return cn
def update_multi_type_string(self):
if self.multi:
if self.data is None:
self.typestr = "empty"
elif isinstance(self.data, bool):
self.typestr = "boolean"
elif isinstance(self.data, (int, long)):
self.typestr = "integer"
elif isinstance(self.data, float):
self.typestr = "real"
elif isinstance(self.data, basestring):
self.typestr = "string"
else:
self.typestr = self.class_name(self.data.__class__)
def multi_string(self):
if self.data is None or isinstance(self.data, bool):
return str(self.data).lower()
else:
return str(self.data)
def get_valid_types(self, **kwargs):
values = []
if not self.multi:
return values
s = kwargs.get("string", self.multi_string())
for t in self.type.types:
if isinstance(t, das.schematypes.SchemaType):
t = das.get_schema_type(t.name)
if isinstance(t, das.schematypes.Empty):
if s.lower() == "none":
values.append(("empty", None))
elif isinstance(t, das.schematypes.Boolean):
if s.lower() in ("on", "yes", "true", "off", "no", "false"):
v = (s.lower() in ("on", "yes", "true"))
values.append(("boolean", v))
elif isinstance(t, das.schematypes.Integer):
try:
v = long(s)
t._validate_self(v)
values.append(("integer", v))
except:
pass
elif isinstance(t, das.schematypes.Real):
try:
v = float(s)
t._validate_self(v)
values.append(("real", v))
except:
pass
elif isinstance(t, das.schematypes.String):
try:
t._validate_self(s)
values.append(("string", s))
except:
pass
elif isinstance(t, das.schematypes.Class):
try:
v = t.make_default()
v.string_to_value(s)
t._validate_self(v)
values.append((self.class_name(t.klass), v))
except:
pass
return values
def exists(self):
if self.typestr != "alias":
if self.parent and self.parent.mapping and self.parent.mappingkeytype is None:
return (True if not self.optional else (self.key in self.parent.data))
else:
return True
else:
return False
def update(self, data, type=None, hideDeprecated=True, hideAliases=True, showHidden=False, fieldFilters=None):
self.reset_members()
self.data = data
self.type = type
if self.type is None and self.data is not None:
self.type = self.data._get_schema_type()
if self.type is None:
raise Exception("No schema type for model item")
self.baseType = self.type
if das.schematypes.Alias.Check(self.type):
# Shortcut
self.typestr = "alias"
self.data = None
if fieldFilters and not fieldFilters.matches(self.fullname(skipRoot=True)):
return False
else:
return True
# initialize those two with original type
self.editableType = self.type.editable
self.desc = self.get_description(self.type)
self.optional = isinstance(self.type, das.schematypes.Optional)
self.deprecated = isinstance(self.type, das.schematypes.Deprecated)
self.type = self.real_type(self.type)
# # override description using used type
# if not self.desc:
# self.desc = self.type.description
self.multi = isinstance(self.type, das.schematypes.Or)
# if originally editable, check that type value can effectively be edited
self.editableValue = self.is_editable(self.type)
self.editable = (self.editableType and self.editableValue)
self.compound = self.is_compound(self.type)
if not self.compound:
if self.multi:
self.update_multi_type_string()
# Try to figure actual datatype. If it is a compound, built matching item tree
for typ in self.type.types:
if das.check(data, typ):
if isinstance(data, (das.types.Sequence, das.types.Tuple, das.types.Set, das.types.Struct, das.types.Dict)):
self.multi = False
self.compound = True
self.desc = self.get_description(typ)
self.type = self.real_type(typ)
self.editableType = self.type.editable
self.editableValue = self.is_editable(self.type)
self.editable = (self.editableType and self.editableValue)
# if not self.desc:
# self.desc = self.type.description
break
else:
if isinstance(self.type, das.schematypes.Boolean):
self.typestr = "boolean"
elif isinstance(self.type, das.schematypes.Integer):
self.typestr = "integer"
elif isinstance(self.type, das.schematypes.Real):
self.typestr = "real"
elif isinstance(self.type, das.schematypes.String):
self.typestr = "string"
elif isinstance(self.type, das.schematypes.Empty):
self.typestr = "empty"
elif isinstance(self.type, das.schematypes.Class):
self.typestr = self.class_name(self.type.klass)
elif das.schematypes.Alias.Check(self.type):
self.typestr = "alias"
if self.compound:
if isinstance(self.type, das.schematypes.Sequence):
self.typestr = "list"
self.resizable = True
self.orderable = True
if self.exists():
for i in xrange(len(self.data)):
itemname = "[%d]" % i
itemdata = self.data[i]
newitem = ModelItem(itemname, row=i, parent=self)
if | |
= next_ids[i] - 1
transitions[nid] = next_ids
return Network(nodes, transitions)
def delete_nodes(self, node_ids):
# Make sure no duplicates.
for i in range(len(node_ids)):
assert node_ids[i] not in node_ids[i+1:]
network = Network(self.nodes, self.transitions)
# Delete from high to low so the node_ids don't get messed up.
node_ids = reversed(sorted(node_ids))
for nid in node_ids:
network = network.delete_node(nid)
return network
def merge_nodes(self, node_ids, nodeid2parents=None):
"""node_ids is a list of the indexes of nodes. Replace all
these nodes with just a single one. Returns a new Network
object."""
if nodeid2parents is None:
nodeid2parents = _make_parents_dict(self)
node_ids = sorted(node_ids)
# Make sure no duplicate node_ids.
for i in range(len(node_ids) - 1):
assert node_ids[i] != node_ids[i + 1], "Duplicate node IDs."
# Make sure nodes are the same type.
for i in range(1, len(node_ids)):
n1 = self.nodes[node_ids[0]]
n2 = self.nodes[node_ids[i]]
assert n1.__class__ == n2.__class__, "%s %s" % (
n1.__class__, n2.__class__)
# Keep the first node, and delete the rest.
# Make transitions to any node_ids point to the first one.
node_id = node_ids[0]
prev_ids = []
for nid in node_ids[1:]:
x = nodeid2parents.get(nid, [])
prev_ids.extend(x)
transitions = self.transitions.copy()
for prev_id in prev_ids:
x = transitions[prev_id]
if node_id not in x:
x = x + [node_id]
transitions[prev_id] = x
# Make the first node point to all the next_nodes of the other
# node_ids.
nid0 = node_ids[0]
for nidi in node_ids[1:]:
x = transitions.get(nid0, []) + transitions.get(nidi, [])
x = sorted({}.fromkeys(x))
transitions[nid0] = x
x = Network(self.nodes, transitions)
x = x.delete_nodes(node_ids[1:])
return x
def __cmp__(self, other):
if not isinstance(other, Network):
return cmp(id(self), id(other))
# Optimization. Do some quick comparisons first.
if id(self) == id(other):
return 0
x = cmp(len(self.nodes), len(other.nodes))
if x != 0:
return x
x = cmp(self.transitions, other.transitions)
if x != 0:
return x
x1 = [self.nodes, self.transitions]
x2 = [other.nodes, other.transitions]
return cmp(x1, x2)
@staticmethod
def __init_from_dict(args):
assert 'nodes' in args
assert 'transitions' in args
new_transition = dict()
for key, value in args['transitions'].items():
new_transition[int(key)] = value
inst = Network(args['nodes'], new_transition)
return inst
def make_network(moduledb, out_data, custom_attributes):
import copy
# Clean up this code.
network = _init_network(moduledb, out_data, custom_attributes)
# Split the data nodes so that everything is TYPE_ATOM. Fixes
# problems in the inference, and also makes the other steps easier
# to handle. Carefully build the network back up.
network = _split_network(network)
optimizers = [
# There should not be any cycles.
#_OptimizeNoCycles(),
_OptimizeNoInvalidOutputs(),
_OptimizeNoDuplicateModules(),
_OptimizeNoDuplicateData(),
_OptimizeMergeData1(),
# Don't do this merging. See below for reason.
#_OptimizeMergeData2(),
]
it = 0
old_network = None
while old_network != network:
old_network = copy.deepcopy(network)
for opt in optimizers:
#old_network2 = copy.deepcopy(network)
network = opt.optimize(network, custom_attributes)
#if old_network2 != network:
# print "Optimized with %s." % opt.__class__.__name__
it += 1
#num = 0
#plot_network_gv("test-%02d.png" % num, network, verbose=True); num+=1
# This makes the network really messy. Might have to be rewritten.
#network = _complete_network(network, custom_attributes)
return network
def _init_network(moduledb, out_data, custom_attributes):
# Return a Network object.
check_moduledb(moduledb)
if isinstance(out_data, DataType):
out_data = out_data.output()
assert isinstance(out_data, DataNode)
nodes = [] # list of DataNode or ModuleNode objects.
transitions = {} # list of index -> list of indexes
nodes.append(out_data)
stack = [0]
seen = {}
while stack:
assert len(nodes) < MAX_NETWORK_SIZE, "network too large"
#_print_network(Network(nodes, transitions))
# Pop the next node off the stack.
node_id = stack.pop()
assert node_id < len(nodes)
node = nodes[node_id]
# If I've already seen this node, then don't process it again.
if node_id in seen:
continue
seen[node_id] = 1
if isinstance(node, DataNode):
# Backwards chain to the previous module.
modules = _bc_to_modules(moduledb, node)
for m in modules:
nodes.append(m)
m_id = len(nodes) - 1
stack.append(m_id)
transitions[m_id] = transitions.get(m_id, [])
transitions[m_id].append(node_id)
elif isinstance(node, ModuleNode):
x = [_bc_to_inputs(node, nodes[x], custom_attributes)
for x in transitions[node_id]]
all_inputs = _uniq(_flatten(x))
# XXX Why only one chain back from one data node?
#cons_id = transitions[node_id][0]
#all_inputs = _bc_to_inputs(
# node, nodes[cons_id], custom_attributes)
for d in all_inputs:
d_id = _find_same_data(nodes, d)
if d_id == -1:
nodes.append(d)
d_id = len(nodes) - 1
stack.append(d_id)
transitions[d_id] = transitions.get(d_id, [])
transitions[d_id].append(node_id)
else:
raise AssertionError, "Unknown node type: %s" % node
# Remove the duplicates from transitions.
for nid, next_ids in transitions.iteritems():
transitions[nid] = _uniq(next_ids)
network = Network(nodes, transitions)
return network
def _split_network(network):
# Inferencing can lead to a situation where a ModuleNode points to
# DataNode that it can't generate. E.g.
# trim_adapters -> Fastq.trimmed=["no", "yes"] (should only be "yes")
#
# Solution: split Fastq into multiple objects.
# _OptimizeNoInvalidOutputs will remove the bad links.
import itertools
nodeid2parents = _make_parents_dict(network)
to_delete = []
for node_id in range(len(network.nodes)):
node = network.nodes[node_id]
if not isinstance(node, DataNode):
continue
# Look for attributes with multiple values. Once found, replace
# with all possible individual values.
attr_names = [] # list of attribute names
attr_values = [] # list of list of attribute values
for name, value in node.attributes.iteritems():
if _get_attribute_type(value) != TYPE_ENUM:
continue
attr_names.append(name)
attr_values.append(value)
if not attr_names:
continue
# Make a new DataNode.
for values in itertools.product(*attr_values):
attrs = node.attributes.copy()
assert len(values) == len(attr_names)
for name, value in zip(attr_names, values):
attrs[name] = value
x = DataNode(node.datatype, **attrs)
network.nodes.append(x)
nid = len(network.nodes)-1
# Make sure this points to all the children of the
# previous node.
network.transitions[nid] = network.transitions[node_id][:]
# Make sure all the parent nodes point to this one.
for pid in nodeid2parents.get(node_id, []):
network.transitions[pid].append(nid)
# Mark the old node for deletion.
to_delete.append(node_id)
network = network.delete_nodes(to_delete)
return network
def _complete_network(network, custom_attributes):
# Sometimes, the network generated by backchaining may be missing
# some links. This function will search for missing links and add
# them back into the network. Returns a new Network object.
#
# Example:
# 1. PSF (preprocess=unknown) -> rank_genes_by_class_neighbors ->
# GeneListFile
# preprocess assigned to unknown because it is the default
# value for PSF files.
# 2. During inferencing, PSF (preprocess=illumina) is created.
# It does not point to rank_genes_by_class_neighbors--it
# points to another module.
# 3. complete_network will add link:
# PSF (preprocess=illumina) -> rank_genes_by_class_neighbors
#
# This occurs because of the optimization we made where
# backchaining created antecedents with default values. If the
# antecedents countained all possible values, this would not be
# necessary.
import copy
import itertools
debug_print("Completing network.")
network = copy.deepcopy(network)
nodeid2parents = _make_parents_dict(network)
ancestors = _make_ancestor_dict(network)
descendents = _make_descendent_dict(network)
# For each DataNode object, check to see if it can be the
# antecedent of any ModuleNode objects.
data_ids = [x for x in range(len(network.nodes))
if isinstance(network.nodes[x], DataNode)]
module_ids = [x for x in range(len(network.nodes))
if isinstance(network.nodes[x], ModuleNode)]
for x in itertools.product(data_ids, module_ids):
input_id, module_id = x
# If data_id already points to module_id, then ignore
# this.
if module_id in network.transitions.get(input_id, []):
continue
# If this node is not a DataType that the module takes, then
# don't bother checking.
found = False
for dt in network.nodes[module_id].in_datatypes:
if network.nodes[input_id].datatype.name == dt.name:
found = True
break
if not found:
continue
# Don't add a link from data_id to module_id if it would
# create a cycle.
if module_id in ancestors[input_id]:
#debug_print("Skipping DataNode %d -> ModuleNode %d (cycle)." % (
# input_id, module_id))
continue
# Since modules can take multiple inputs, we need to combine
# input_id with all previous input IDs and try all possible
# combinations.
#x = _get_parents_of(network, module_id)
x = nodeid2parents.get(module_id, [])
combined_ids = x + [input_id]
# Find combinations of inputs that are compatible with the
# network.
combos = _bc_to_input_ids(
network, module_id, custom_attributes, all_input_ids=combined_ids,
nodeid2parents=nodeid2parents)
# Add the new transitions.
added = []
for id_ in itertools.chain.from_iterable(combos):
# Probably don't need to search through. All the id_'s,
# except for input_id, is already in a parent of this
# node.
assert id_ in network.transitions
if module_id in network.transitions[id_]:
continue
# Add id_ -> module_id.
network.transitions[id_].append(module_id)
added.append(id_)
debug_print(
"Completing DataNode %s [%d] -> ModuleNode %s [%d]." % (
network.nodes[id_].datatype.name, id_,
network.nodes[module_id].name, | |
98.81 SOURCE3 2 2.1583
cc-ss-ss 74.9 93.80 CORR_SOURCE5 31 0.9858
cd-ss-cd 78.0 90.24 SOURCE3_SOURCE5 652 1.5043
cd-ss-n 96.8 93.58 SOURCE3_SOURCE5 6 2.0175
cd-ss-na 93.8 99.33 SOURCE3 18 2.5847
cd-ss-nd 99.5 93.22 CORR_SOURCE5 25 1.5563
cd-ss-os 94.7 98.81 SOURCE3 2
cd-ss-ss 74.9 93.80 CORR_SOURCE5 31 0.9858
cl-ss-cl 90.1 103.37 SOURCE3 1
cx-ss-cx 102.2 48.30 SOURCE2 1
f -ss-f 129.7 98.30 SOURCE2 1
f -ss-ss 90.1 108.30 SOURCE2 1
i -ss-i 72.6 106.29 SOURCE3 1
n1-ss-n1 128.8 96.96 HF/6-31G* 1
n2-ss-n2 125.3 96.75 SOURCE3 1
n3-ss-n3 117.1 102.34 SOURCE3 1
n4-ss-n4 112.0 101.19 SOURCE3 1
na-ss-na 116.2 102.81 SOURCE3 1
nc-ss-nc 126.8 97.99 CORR_SOURCE5 29 0.5000
nd-ss-nd 126.8 97.99 CORR_SOURCE5 29 0.5000
nh-ss-nh 115.0 107.89 SOURCE3 1
n -ss-n 116.5 103.10 SOURCE3 1
no-ss-no 108.2 106.43 SOURCE3 1
oh-ss-oh 118.4 104.25 SOURCE3 1
o -ss-o 124.0 119.30 SOURCE2 1
o -ss-p5 114.3 106.41 SOURCE3 1
o -ss-s6 89.5 105.39 SOURCE3 1
os-ss-os 118.0 102.99 SOURCE3 1
o -ss-ss 88.2 112.70 SOURCE2 1
p2-ss-p2 127.8 99.52 SOURCE3 1
p3-ss-p3 117.2 101.67 SOURCE3 1
p5-ss-p5 126.6 87.37 SOURCE3_SOURCE5 11 1.2491
s4-ss-s4 71.7 96.08 SOURCE3 1
s4-ss-s6 70.6 101.26 SOURCE3 1
s6-ss-s6 71.2 101.81 SOURCE3 1
sh-ss-sh 71.0 107.54 SOURCE3 1
sh-ss-ss 71.2 106.53 SOURCE3 1
s -ss-s 67.9 115.04 SOURCE3 1
ss-ss-ss 70.7 107.93 SOURCE4_SOURCE5 72 1.6368
c3-sx-ca 72.2 96.64 SOURCE4_SOURCE5 41 0.4942
c3-sx-cc 73.1 95.18 SOURCE4_SOURCE5 41 0.6549
c3-sx-ce 72.9 95.29 SOURCE3_SOURCE5 10 0.5723
c3-sx-cf 72.9 95.29 SOURCE3_SOURCE5 7 0.8172
c3-sx-ne 93.4 90.06 SOURCE3 5 1.9627
c3-sx-nf 93.4 90.06 SOURCE3 5
c3-sx-o 91.9 107.52 SOURCE3_SOURCE5 84 0.7996
c3-sx-pe 91.5 94.32 SOURCE3 7 0.5547
c3-sx-pf 91.5 94.32 SOURCE3 7
c3-sx-px 88.3 96.46 SOURCE3 3 1.3351
c3-sx-py 88.2 95.67 SOURCE3 1
c3-sx-sx 67.2 91.47 SOURCE3 4 1.9919
c3-sx-sy 68.9 95.47 SOURCE3 3 2.8422
ca-sx-ca 72.9 95.75 SOURCE3_SOURCE5 14 1.8607
ca-sx-o 92.7 107.15 SOURCE4_SOURCE5 86 0.9103
c -sx-c3 72.5 92.71 SOURCE3 3 0.3095
c -sx-c 74.1 86.85 SOURCE3 1
cc-sx-o 94.3 104.81 SOURCE4_SOURCE5 45 1.5594
ce-sx-ce 73.5 94.96 SOURCE3 1
ce-sx-o 92.5 108.23 SOURCE3_SOURCE5 27 0.8358
cf-sx-cf 73.5 94.96 SOURCE3 1
cf-sx-o 92.5 108.23 SOURCE3_SOURCE5 22 0.9547
c -sx-o 90.9 106.17 SOURCE3 5 0.9477
ne-sx-ne 107.6 106.45 SOURCE3_SOURCE5 5 1.4815
ne-sx-o 114.1 109.81 SOURCE3_SOURCE5 13 1.0385
nf-sx-nf 107.6 106.45 SOURCE3_SOURCE5 5 1.4815
nf-sx-o 114.1 109.81 SOURCE3_SOURCE5 6 0.5536
o -sx-pe 111.8 106.43 SOURCE3 9 2.8345
o -sx-pf 111.8 106.43 SOURCE3 9
o -sx-px 109.1 104.77 SOURCE3 3 1.9810
o -sx-py 106.2 109.13 SOURCE3 7 5.6840
o -sx-sx 79.8 104.65 SOURCE3 6 3.0524
o -sx-sy 85.1 103.41 SOURCE3 5 0.9618
pe-sx-pe 120.1 92.62 SOURCE3 1
pf-sx-pf 120.1 92.62 SOURCE3 1
py-sx-py 133.3 69.23 SOURCE3 3 17.4143
sx-sx-sx 69.1 84.90 SOURCE3 1
sy-sx-sy 69.8 93.52 SOURCE3 1
c3-sy-ca 70.9 103.93 SOURCE4_SOURCE5 136 0.4172
c3-sy-cc 71.8 101.95 SOURCE4_SOURCE5 32 1.4362
c3-sy-ce 71.1 103.53 SOURCE3_SOURCE5 11 1.3594
c3-sy-cf 71.1 103.53 SOURCE3_SOURCE5 8 1.7429
c3-sy-ne 92.4 102.19 SOURCE3_SOURCE5 11 3.1966
c3-sy-nf 92.4 102.19 SOURCE3_SOURCE5 6 2.3703
c3-sy-o 93.8 107.85 SOURCE3_SOURCE5 283 0.5690
c3-sy-pe 85.5 106.03 SOURCE3 6 2.6117
c3-sy-pf 85.5 106.03 SOURCE3 6
c3-sy-px 85.4 103.62 SOURCE3 3 0.7078
c3-sy-py 87.5 103.39 SOURCE3 3 0.4563
c3-sy-sx 66.1 104.64 SOURCE3 3 4.6276
c3-sy-sy 67.5 100.78 SOURCE3 4 1.1633
ca-sy-ca 71.1 104.44 SOURCE4_SOURCE5 55 1.7845
ca-sy-cc 71.0 105.09 SOURCE4_SOURCE5 10 0.3628
ca-sy-n3 92.2 102.44 SOURCE4_SOURCE5 407 1.1038
ca-sy-n 90.5 105.37 SOURCE4_SOURCE5 122 1.2203
ca-sy-ne 92.5 103.01 SOURCE4_SOURCE5 36 2.1672
ca-sy-nh 90.5 105.50 SOURCE4_SOURCE5 205 1.5936
ca-sy-o 94.3 108.35 SOURCE3_SOURCE5 1362 0.6985
ca-sy-oh 93.8 101.30 SOURCE4_SOURCE5 94 0.8210
ca-sy-os 96.8 92.98 SOURCE3 1
c -sy-c3 70.4 101.25 SOURCE3 3 1.1850
c -sy-c 69.9 99.81 SOURCE3 1
cc-sy-n3 92.4 102.53 CORR_SOURCE5 35 0.5689
cc-sy-o 94.8 107.89 CORR_SOURCE5 130 0.8911
cd-sy-n3 92.4 102.53 CORR_SOURCE5 35 0.5689
cd-sy-nh 94.5 97.20 SOURCE4_SOURCE5 12 0.2429
cd-sy-o 94.8 107.89 CORR_SOURCE5 130 0.8911
ce-sy-ce 71.8 102.78 SOURCE3 1
ce-sy-o 94.4 108.38 SOURCE3_SOURCE5 66 0.9753
cf-sy-cf 71.8 102.78 SOURCE3 1
cf-sy-o 94.4 108.38 SOURCE3_SOURCE5 56 1.0516
c -sy-o 91.7 107.48 SOURCE3_SOURCE5 16 0.7996
n2-sy-o 121.9 123.53 SOURCE4 6 1.2388
n3-sy-ne 120.0 101.93 SOURCE4_SOURCE5 15 1.4395
n3-sy-o 123.4 107.13 SOURCE4_SOURCE5 863 1.1609
na-sy-na 119.4 98.04 SOURCE3 1
nc-sy-nc 132.6 98.04 SOURCE3 2
nd-sy-nd 132.6 98.04 SOURCE3 2
ne-sy-ne 122.9 98.62 SOURCE3 1
ne-sy-o 123.2 109.65 SOURCE3_SOURCE5 101 1.9902
nf-sy-nf 122.9 98.62 SOURCE3 1
nf-sy-o 123.2 109.65 SOURCE3_SOURCE5 87 1.9451
nh-sy-o 123.1 106.23 SOURCE4_SOURCE5 319 1.7353
n -sy-o 122.2 107.54 SOURCE4_SOURCE5 155 1.8699
o -sy-o 126.4 121.41 SOURCE3_SOURCE5 734 0.8526
o -sy-oh 126.0 106.68 SOURCE3_SOURCE5 166 0.5588
o -sy-os 123.1 107.52 SOURCE4_SOURCE5 38 1.6656
o -sy-pe 109.5 106.90 SOURCE3 12 1.4524
o -sy-pf 109.5 106.90 SOURCE3 12
o -sy-px 108.1 106.17 SOURCE3 6 0.7059
o -sy-py 111.2 106.67 SOURCE3 10 0.6478
o -sy-sx 84.0 106.33 SOURCE3 10 2.0456
o -sy-sy 84.2 106.19 SOURCE3 12 0.1754
py-sy-py 112.3 104.49 SOURCE3 1
sx-sy-sx 66.8 101.99 SOURCE3 1
sy-sy-sy 66.5 103.29 SOURCE3 1
c2-c1-cf 60.0 179.05 SOURCE4_SOURCE5 9 0.3913
c3-c1-ch 57.7 178.43 SOURCE4_SOURCE5 95 0.5682
nf-c1-s 73.6 175.82 SOURCE4_SOURCE5 15 0.2067
br-c2-cf 64.3 121.53 SOURCE4_SOURCE5 11 0.7009
cd-c2-h4 49.8 119.85 SOURCE4_SOURCE5 16 0.8001
cd-c2-nh 86.6 123.12 SOURCE4_SOURCE5 17 1.2171
cd-c2-o 91.4 123.59 SOURCE4_SOURCE5 6 0.0560
cf-c2-cl 72.1 123.47 SOURCE4_SOURCE5 30 1.0225
cf-c2-h4 49.7 122.31 SOURCE4_SOURCE5 145 1.6214
cf-c2-na 86.1 124.17 SOURCE4_SOURCE5 6 1.9423
cf-c2-nh 87.8 120.71 SOURCE4_SOURCE5 150 2.3947
cf-c2-no 86.1 119.65 SOURCE4_SOURCE5 5 0.9817
cf-c2-o 92.0 123.37 SOURCE4_SOURCE5 9 1.0481
cf-c2-oh 88.6 123.13 SOURCE4_SOURCE5 62 1.7479
cf-c2-os 88.0 122.80 SOURCE4_SOURCE5 98 2.2743
h4-c2-nf 64.9 119.51 SOURCE4_SOURCE5 42 1.6302
h5-c2-nf 64.7 119.85 SOURCE4_SOURCE5 27 1.3790
nf-c2-os 114.2 118.76 SOURCE4 5
nf-c2-ss 82.2 120.51 SOURCE4_SOURCE5 23 2.4188
n -c2-nf 109.3 125.34 SOURCE4_SOURCE5 15 1.5591
ca-c3-cf 65.6 112.21 SOURCE4_SOURCE5 93 1.2595
cd-c3-cx 65.7 112.40 5/2017 1
c -c3-cf 65.5 111.89 SOURCE4_SOURCE5 59 1.5769
cd-c3-hx 47.6 111.01 SOURCE4_SOURCE5 10 0.7123
cd-c3-n2 84.6 110.31 SOURCE4_SOURCE5 21 0.5628
cd-c3-n4 81.5 115.58 SOURCE4_SOURCE5 6 1.1723
cd-c3-na 83.6 113.15 SOURCE4_SOURCE5 10 0.6466
cd-c3-p5 79.5 116.23 SOURCE4_SOURCE5 6 0.7766
cf-c3-cf 65.8 111.47 SOURCE4_SOURCE5 35 0.5985
cf-c3-n 84.3 110.22 SOURCE4_SOURCE5 10 1.0919
cf-c3-oh 85.0 111.19 SOURCE4_SOURCE5 57 1.5702
cf-c3-os 85.4 109.50 SOURCE4_SOURCE5 55 1.8883
cf-c3-ss 63.3 110.72 SOURCE4_SOURCE5 12 1.7025
cd-ca-cq 66.0 124.30 SOURCE4_SOURCE5 10 0.6423
cf-ca-na 84.1 119.92 SOURCE4_SOURCE5 29 0.5242
ch-ca-cq 67.3 121.53 SOURCE4_SOURCE5 12 0.1831
cl-ca-cq 71.7 120.39 SOURCE4_SOURCE5 34 0.5366
cq-ca-f 88.8 119.42 SOURCE4_SOURCE5 30 0.2799
cq-ca-h4 48.4 120.09 SOURCE4_SOURCE5 35 0.4098
cq-ca-na 90.7 108.79 SOURCE4_SOURCE5 349 0.5003
cq-ca-nb 86.4 123.58 SOURCE4_SOURCE5 79 0.8527
cq-ca-nh 85.7 121.56 SOURCE4_SOURCE5 19 0.6123
cq-ca-oh 86.6 120.85 SOURCE4_SOURCE5 29 1.4592
cq-ca-ss 66.0 111.17 SOURCE4_SOURCE5 16 2.4162
ca-c -nf 85.3 114.71 SOURCE4_SOURCE5 9 0.7464
br-cd-c 65.2 116.28 SOURCE4_SOURCE5 24 1.3164
br-cd-cd 63.4 124.05 SOURCE4_SOURCE5 23 1.9356
br-cd-cc 63.7 124.23 SOURCE4_SOURCE5 84 2.2845
br-cd-na 80.6 121.58 SOURCE4_SOURCE5 13 0.9881
ca-cd-cf 64.3 127.01 SOURCE4_SOURCE5 27 1.6430
ca-cd-nh 84.3 122.13 SOURCE4_SOURCE5 11 2.0536
cd-c -cf 66.4 115.57 SOURCE4_SOURCE5 8 1.2130
cd-cd-f 88.4 119.19 SOURCE4_SOURCE5 19 1.0481
c -cd-ch 67.0 117.88 SOURCE4_SOURCE5 19 0.6396
cd-cd-sy 61.1 128.25 SOURCE4_SOURCE5 12 0.8482
cc-cd-f 89.6 121.19 SOURCE4_SOURCE5 54 0.6386
cc-cd-no 82.9 128.69 SOURCE4_SOURCE5 197 1.4212
c -cd-f 87.8 116.98 SOURCE4_SOURCE5 33 0.4384
ch-cd-na 84.9 122.61 SOURCE4_SOURCE5 7 1.0826
ch-cd-ss 63.8 120.73 SOURCE4_SOURCE5 15 0.9326
cd-c -h4 47.6 114.83 SOURCE4_SOURCE5 20 0.4400
cl-cd-na 90.5 121.12 SOURCE4_SOURCE5 25 0.9015
cl-cd-ss 71.9 119.85 SOURCE4_SOURCE5 16 0.8775
c -cd-nf 84.5 119.88 SOURCE4 6
cd-c -s 64.0 126.28 SOURCE4_SOURCE5 57 2.2083
cd-c -ss 64.4 112.40 SOURCE4_SOURCE5 32 1.0830
cx-cd-nc 85.6 119.81 5/2017 2
cx-cd-os 85.4 118.07 SOURCE4_SOURCE5 13 0.0898
cc-c -cx 65.4 117.59 5/2017 1
cc-c -nc 86.5 113.75 SOURCE4_SOURCE5 14 0.0860
cf-c -cx 65.0 117.91 5/2017 13 0.7631
cf-c -h4 47.2 114.89 SOURCE4_SOURCE5 94 0.4993
cf-c -ss 64.8 110.49 SOURCE4_SOURCE5 8 0.5728
na-cd-no 105.3 124.59 SOURCE4_SOURCE5 114 0.8160
na-cd-oh 111.7 117.48 SOURCE4_SOURCE5 23 1.0304
na-cd-sx 79.7 117.02 SOURCE4_SOURCE5 19 0.3766
na-cd-sy 79.5 120.46 SOURCE4_SOURCE5 8 1.7069
nd-cd-no 106.9 121.73 SOURCE4_SOURCE5 10 0.8384
nc-cd-nc 110.8 128.07 SOURCE4_SOURCE5 10 0.4198
nc-cd-nf 107.8 129.01 SOURCE4_SOURCE5 13 1.6879
nc-cd-no 108.2 122.75 SOURCE4_SOURCE5 64 0.2909
nc-cd-sh 79.2 124.97 SOURCE4_SOURCE5 13 0.8081
nc-cd-sx 76.8 127.74 SOURCE4_SOURCE5 19 0.3234
nc-cd-sy 79.3 123.03 SOURCE4_SOURCE5 20 1.2273
nf-cd-ss 81.7 117.03 SOURCE4_SOURCE5 10 0.2421
n -cd-n2 112.9 119.42 SOURCE4_SOURCE5 13 0.1189
no-cd-os 109.1 117.55 SOURCE4_SOURCE5 82 0.2764
no-cd-ss 79.7 121.06 SOURCE4_SOURCE5 23 0.2526
ca-cc-cf 66.7 124.90 SOURCE4_SOURCE5 32 1.6591
ca-cc-na 83.6 123.45 SOURCE4 39
cd-cc-cg 67.1 125.79 SOURCE4_SOURCE5 54 1.7418
cd-cc-cy 66.4 121.68 5/2017 4 2.0175
cd-cc-nd 88.1 123.82 SOURCE4_SOURCE5 14 0.3678
cc-cc-cy 64.6 124.39 5/2017 2 0.0292
cf-cc-nc 86.6 123.98 SOURCE4_SOURCE5 5 2.4219
c -cc-h4 47.1 118.19 SOURCE4_SOURCE5 8 0.2226
na-cc-nh 110.8 117.28 SOURCE4_SOURCE5 54 1.7570
na-cc-ss 83.7 111.46 SOURCE4 20
nc-cc-nc 107.6 125.70 SOURCE4_SOURCE5 18 0.6787
oh-cc-os 115.4 111.61 SOURCE4_SOURCE5 6 1.1909
c2-cf-cl 72.1 119.76 SOURCE4_SOURCE5 38 1.3369
c2-cf-h4 49.2 124.55 SOURCE4_SOURCE5 32 1.8945
c2-cf-n1 91.3 118.23 SOURCE4_SOURCE5 11 1.2780
c2-cf-na 87.2 119.19 SOURCE4_SOURCE5 5 0.8452
c2-cf-oh 88.0 123.70 SOURCE4_SOURCE5 17 1.7138
c3-cf-ch 66.0 117.22 SOURCE4_SOURCE5 26 1.7890
c3-cf-ne 84.4 120.68 SOURCE4_SOURCE5 7 2.0560
c3-cf-nh 82.7 119.56 SOURCE4_SOURCE5 5 1.0524
ca-cf-cf 65.7 119.54 SOURCE4_SOURCE5 18 1.9239
ca-cf-cl 72.2 114.59 SOURCE4_SOURCE5 8 0.9719
ca-cf-h4 47.0 116.99 SOURCE4_SOURCE5 181 1.0407
ca-cf-nh 85.5 115.58 SOURCE4_SOURCE5 147 1.1060
ca-cf-os 85.8 115.91 SOURCE4_SOURCE5 17 1.5899
ca-cf-ss 63.4 117.52 SOURCE4_SOURCE5 9 1.2901
c -cf-ca 65.5 118.28 SOURCE4_SOURCE5 17 1.7879
cd-cf-cc 65.3 130.61 SOURCE4_SOURCE5 19 0.8270
c -cf-cf 65.2 120.98 SOURCE4_SOURCE5 37 2.3876
c -cf-ch 66.5 118.42 SOURCE4_SOURCE5 34 1.0602
cd-cf-h4 47.9 115.68 SOURCE4_SOURCE5 48 0.8279
c -cf-cl 71.8 115.47 SOURCE4_SOURCE5 19 1.2383
cd-cf-nh 85.3 118.05 SOURCE4_SOURCE5 13 1.6005
c -cf-cy 74.7 88.44 SOURCE4_SOURCE5 34 1.2419
cf-cf-cl 71.6 117.22 SOURCE4_SOURCE5 | |
"""
genome_mutation_test.py
Test gene-specific operations.
"""
import os
import unittest
import numpy as np
from config import Config
from population.utils.genome_util.genes import ConnectionGene, GruNodeGene, OutputNodeGene, SimpleNodeGene
from utils.dictionary import *
def get_connection_gene(key, config):
return ConnectionGene(key, config)
def get_gru_node_gene(key, config):
return GruNodeGene(key, config, input_keys=[-1], input_keys_full=[-1, -2])
def get_output_node_gene(key, config):
return OutputNodeGene(key, config)
def get_simple_node_gene(key, config):
return SimpleNodeGene(key, config)
class SimpleNode(unittest.TestCase):
"""Test the SimpleNodeGene's mutation operations."""
def test_activation(self):
"""> Test if activation changes during mutation."""
# Folder must be root to load in make_net properly
if os.getcwd().split('\\')[-1] == 'tests': os.chdir('..')
# After mutations, activation is always one of the options
cfg = Config().genome
cfg.activation_default = "a"
cfg.activation_mutate_rate = 1.0
OPTIONS = {"a": 1, "b": 2, "c": 3}
cfg.activation_options = OPTIONS
gene = get_simple_node_gene(0, cfg)
changed = False
for _ in range(100):
gene.mutate(cfg)
self.assertTrue(gene.activation in OPTIONS.keys())
if gene.activation != "a": changed = True
self.assertTrue(changed) # Almost impossible that this failed
# Set mutation rate to 0, activation should not mutate
cfg.activation_default = "a"
cfg.activation_mutate_rate = 0.0
cfg.activation_options = OPTIONS
gene = get_simple_node_gene(0, cfg)
for _ in range(100):
gene.mutate(cfg)
self.assertTrue(gene.activation == 'a')
def test_aggregation(self):
"""> Test if aggregation changes during mutation."""
# Folder must be root to load in make_net properly
if os.getcwd().split('\\')[-1] == 'tests': os.chdir('..')
# After mutations, aggregation is always one of the options
cfg = Config().genome
cfg.aggregation_default = "a"
cfg.aggregation_mutate_rate = 1.0
OPTIONS = {"a": 1, "b": 2, "c": 3}
cfg.aggregation_options = OPTIONS
gene = get_simple_node_gene(0, cfg)
changed = False
for _ in range(100):
gene.mutate(cfg)
self.assertTrue(gene.aggregation in OPTIONS.keys())
if gene.aggregation != "a": changed = True
self.assertTrue(changed) # Almost impossible that this failed
# Set mutation rate to 0, aggregation should not mutate
cfg.aggregation_default = "a"
cfg.aggregation_mutate_rate = 0.0
cfg.aggregation_options = OPTIONS
gene = get_simple_node_gene(0, cfg)
for _ in range(100):
gene.mutate(cfg)
self.assertTrue(gene.aggregation == 'a')
def test_bias(self):
"""> Test if the bias remains inside its boundaries during mutation."""
# Folder must be root to load in make_net properly
if os.getcwd().split('\\')[-1] == 'tests': os.chdir('..')
# After mutations, bias remains inside its boundary
cfg = Config().genome
cfg.bias_mutate_rate = 0.5
cfg.bias_replace_rate = 0.5
cfg.bias_min_value = -0.1
cfg.bias_max_value = 0.1
gene = get_simple_node_gene(0, cfg)
for _ in range(100):
gene.mutate(cfg)
self.assertTrue(-0.1 <= gene.bias <= 0.1)
# Set mutation rate to 0, no change should happen
cfg.bias_mutate_rate = 0
cfg.bias_replace_rate = 0
gene = get_simple_node_gene(0, cfg)
init_bias = gene.bias
for _ in range(100):
gene.mutate(cfg)
self.assertTrue(gene.bias == init_bias)
# Set mutation power to 0, no change should happen
cfg.bias_mutate_rate = 1
cfg.bias_replace_rate = 0
cfg.bias_mutate_power = 0
gene = get_simple_node_gene(0, cfg)
init_bias = gene.bias
for _ in range(100):
gene.mutate(cfg)
self.assertTrue(gene.bias == init_bias)
class OutputNode(unittest.TestCase):
"""Test the OutputNodeGene's mutation operations."""
def test_activation(self):
"""> Test if activation remains tanh after mutation."""
# Folder must be root to load in make_net properly
if os.getcwd().split('\\')[-1] == 'tests': os.chdir('..')
# After mutations, activation is always one of the options
cfg = Config().genome
cfg.activation_default = "a"
cfg.activation_mutate_rate = 1.0
OPTIONS = {"a": 1, "b": 2, "c": 3}
cfg.activation_options = OPTIONS
gene = get_output_node_gene(0, cfg)
for _ in range(100):
gene.mutate(cfg)
self.assertEqual(gene.activation, D_TANH)
def test_aggregation(self):
"""> Test if aggregation changes during mutation."""
# Folder must be root to load in make_net properly
if os.getcwd().split('\\')[-1] == 'tests': os.chdir('..')
# After mutations, aggregation is always one of the options
cfg = Config().genome
cfg.aggregation_default = "a"
cfg.aggregation_mutate_rate = 1.0
OPTIONS = {"a": 1, "b": 2, "c": 3}
cfg.aggregation_options = OPTIONS
gene = get_output_node_gene(0, cfg)
changed = False
for _ in range(100):
gene.mutate(cfg)
self.assertTrue(gene.aggregation in OPTIONS.keys())
if gene.aggregation != "a": changed = True
self.assertTrue(changed) # Almost impossible that this failed
# Set mutation rate to 0, aggregation should not mutate
cfg.aggregation_default = "a"
cfg.aggregation_mutate_rate = 0.0
cfg.aggregation_options = OPTIONS
gene = get_output_node_gene(0, cfg)
for _ in range(100):
gene.mutate(cfg)
self.assertTrue(gene.aggregation == 'a')
def test_bias(self):
"""> Test if the bias remains inside its boundaries during mutation."""
# Folder must be root to load in make_net properly
if os.getcwd().split('\\')[-1] == 'tests': os.chdir('..')
# After mutations, bias remains inside its boundary
cfg = Config().genome
cfg.bias_mutate_rate = 0.5
cfg.bias_replace_rate = 0.5
cfg.bias_min_value = -0.1
cfg.bias_max_value = 0.1
gene = get_output_node_gene(0, cfg)
for _ in range(100):
gene.mutate(cfg)
self.assertTrue(-0.1 <= gene.bias <= 0.1)
# Set mutation rate to 0, no change should happen
cfg.bias_mutate_rate = 0
cfg.bias_replace_rate = 0
gene = get_output_node_gene(0, cfg)
init_bias = gene.bias
for _ in range(100):
gene.mutate(cfg)
self.assertTrue(gene.bias == init_bias)
# Set mutation power to 0, no change should happen
cfg.bias_mutate_rate = 1
cfg.bias_replace_rate = 0
cfg.bias_mutate_power = 0
gene = get_output_node_gene(0, cfg)
init_bias = gene.bias
for _ in range(100):
gene.mutate(cfg)
self.assertTrue(gene.bias == init_bias)
class GruNode(unittest.TestCase):
"""Test the GruNodeGene's mutation operations."""
def test_activation(self):
"""> Test if activation changes during mutation."""
# Folder must be root to load in make_net properly
if os.getcwd().split('\\')[-1] == 'tests': os.chdir('..')
# After mutations, activation is always one of the options
cfg = Config().genome
cfg.activation_default = "a"
cfg.activation_mutate_rate = 1.0
OPTIONS = {"a": 1, "b": 2, "c": 3}
cfg.activation_options = OPTIONS
gene = get_gru_node_gene(0, cfg)
changed = False
for _ in range(100):
gene.mutate(cfg)
self.assertTrue(gene.activation in OPTIONS.keys())
if gene.activation != "a": changed = True
self.assertTrue(changed) # Almost impossible that this failed
# Set mutation rate to 0, activation should not mutate
cfg.activation_default = "a"
cfg.activation_mutate_rate = 0.0
cfg.activation_options = OPTIONS
gene = get_gru_node_gene(0, cfg)
for _ in range(100):
gene.mutate(cfg)
self.assertTrue(gene.activation == 'a')
def test_bias(self):
"""> Test if bias is left unchanged during mutation."""
# Folder must be root to load in make_net properly
if os.getcwd().split('\\')[-1] == 'tests': os.chdir('..')
# After mutations, bias remains equal to zero
cfg = Config().genome
cfg.bias_mutate_rate = 0.5
cfg.bias_replace_rate = 0.5
gene = get_gru_node_gene(0, cfg)
for _ in range(100):
gene.mutate(cfg)
self.assertEqual(gene.bias, 0)
def test_bias_hh(self):
"""> Test if bias_hh behaves as expected"""
# Folder must be root to load in make_net properly
if os.getcwd().split('\\')[-1] == 'tests': os.chdir('..')
# After mutations, bias_hh's values remain inside the set boundary
cfg = Config().genome
cfg.gru_mutate_rate = 0.5
cfg.gru_replace_rate = 0.5
cfg.gru_min_value = -0.1
cfg.gru_max_value = 0.1
gene = get_gru_node_gene(0, cfg)
changed = False
init_bias_hh = gene.bias_hh.copy()
for _ in range(100):
gene.mutate(cfg)
for value in gene.bias_hh:
self.assertTrue(-0.1 <= value <= 0.1)
if np.linalg.norm(gene.bias_hh - init_bias_hh) > 0: changed = True
self.assertTrue(changed)
# Set mutation rate to 0, no change should happen
cfg.gru_mutate_rate = 0
cfg.gru_replace_rate = 0
gene = get_gru_node_gene(0, cfg)
init_bias_hh = gene.bias_hh.copy()
for _ in range(100):
gene.mutate(cfg)
self.assertEqual(np.linalg.norm(gene.bias_hh - init_bias_hh), 0)
# Set mutation power to 0, no change should happen
cfg.gru_mutate_rate = 1
cfg.gru_replace_rate = 0
cfg.gru_mutate_power = 0
gene = get_gru_node_gene(0, cfg)
init_bias_hh = gene.bias_hh.copy()
for _ in range(100):
gene.mutate(cfg)
self.assertEqual(np.linalg.norm(gene.bias_hh - init_bias_hh), 0)
def test_bias_ih(self):
"""> Test if bias_ih behaves as expected"""
# Folder must be root to load in make_net properly
if os.getcwd().split('\\')[-1] == 'tests': os.chdir('..')
# After mutations, bias_ih's values remain inside the set boundary
cfg = Config().genome
cfg.gru_mutate_rate = 0.5
cfg.gru_replace_rate = 0.5
cfg.gru_min_value = -0.1
cfg.gru_max_value = 0.1
gene = get_gru_node_gene(0, cfg)
changed = False
init_bias_ih = gene.bias_ih.copy()
for _ in range(100):
gene.mutate(cfg)
for value in gene.bias_ih:
self.assertTrue(-0.1 <= value <= 0.1)
if np.linalg.norm(gene.bias_ih - init_bias_ih) > 0: changed = True
self.assertTrue(changed)
# Set mutation rate to 0, no change should happen
cfg.gru_mutate_rate = 0
cfg.gru_replace_rate = 0
gene = get_gru_node_gene(0, cfg)
init_bias_ih = gene.bias_ih.copy()
for _ in range(100):
gene.mutate(cfg)
self.assertEqual(np.linalg.norm(gene.bias_ih - init_bias_ih), 0)
# Set mutation power to 0, no change should happen
cfg.gru_mutate_rate = 1
cfg.gru_replace_rate = 0
cfg.gru_mutate_power = 0
gene = get_gru_node_gene(0, cfg)
init_bias_ih = gene.bias_ih.copy()
for _ in range(100):
gene.mutate(cfg)
self.assertEqual(np.linalg.norm(gene.bias_ih - init_bias_ih), 0)
def test_weight_hh(self):
"""> Test if weight_hh behaves as expected"""
# Folder must be root to load in make_net properly
if os.getcwd().split('\\')[-1] == 'tests': os.chdir('..')
# After mutations, weight_hh's values remain inside the set boundary
cfg = Config().genome
cfg.gru_mutate_rate = 0.5
cfg.gru_replace_rate = 0.5
cfg.gru_min_value = -0.1
cfg.gru_max_value = 0.1
| |
os.path.split(file_name)[-1]
mol = self.vf.getMolFromName(tmp_mol_name.replace('-','_'))
f = self.cmdForms['default']
address = f.descr.entryByName['web service address']['widget'].get()
address = address.strip()
global APBS_ssl
if address.find('https://') != 0:
#first check to see if APBS Web Services is up and running
import urllib
opener = urllib.FancyURLopener({})
try:
servlet = opener.open(address)
except IOError:
self.errorMsg=address+" could not be found"
self.errorMsg += "\nPlease make sure that server is up and running"
self.errorMsg += "\nFor more info on APBS Web Services visit http://www.nbcr.net/services"
self.showForm('error')
return
APBS_ssl = False
else:
from mgltools.web.services.SecuritymyproxyloginImplService_services import \
loginUserMyProxyRequestWrapper, \
SecuritymyproxyloginImplServiceLocator
gamaLoginLocator = SecuritymyproxyloginImplServiceLocator()
gamaLoginService = gamaLoginLocator.getSecuritymyproxyloginImpl(
ssl=1,transport=httplib.HTTPSConnection)
req = loginUserMyProxyRequestWrapper()
username = self.cmdForms['default'].descr.\
entryByName['UserName_Entry']['widget'].get()
passwd = self.cmdForms['default'].descr.\
entryByName['Password_Entry']['widget'].get()
req._username = username
req._passwd = <PASSWORD>
resp = gamaLoginService.loginUserMyProxy(req)
f = open(APBS_proxy, "w")
f.write(resp._loginUserMyProxyReturn)
f.close()
APBS_ssl = True
if self.RememberLogin_var.get():
file = open(self.rc_apbs,'w')
user = self.cmdForms['default'].descr.entryByName\
['UserName_Entry']['widget'].get()
passwd = self.cmdForms['default'].descr.entryByName\
['Password_Entry']['widget'].get()
file.write("User:%s\nPassword:%<PASSWORD>"%(user,passwd))
self.params.projectFolder=os.path.join(os.getcwd(),"apbs-"+mol.name)
from thread import start_new_thread
if self.params.calculationType == 'Binding energy':
file_name, ext = os.path.splitext(self.params.molecule2Path)
tmp_mol_name = os.path.split(file_name)[-1]
mol2 = self.vf.getMolFromName(tmp_mol_name.replace('-','_'))
file_name, ext = os.path.splitext(self.params.complexPath)
tmp_mol_name = os.path.split(file_name)[-1]
_complex = self.vf.getMolFromName(tmp_mol_name.replace('-','_'))
self.params.projectFolder += "_" + mol2.name + "_"+ _complex.name
if not os.path.exists(self.params.projectFolder):
os.mkdir(self.params.projectFolder)
self.runWS(address, self.params, mol, mol2, _complex)
else:
if not os.path.exists(self.params.projectFolder):
os.mkdir(self.params.projectFolder)
self.runWS(address, self.params, mol)
#start_new_thread( self.checkForRemoteResults, (self.webServiceResultsQueue,))
def runWS(self, address, params, mol1, mol2 = None, _complex = None):
"""Runs APBS Web Services"""
if self.cmdForms.has_key('default'):
self.apbsWS = APBSCmdToWebService(params, mol1,mol2, _complex)
self.Parallel_flag = False
else:
self.apbsWS = APBSCmdToWebService(params, mol1, mol2, _complex)
self.Parallel_flag = False
try:
f = self.cmdForms['default']
f.descr.entryByName['APBSservicesLabel1']['widget'].\
configure(text = 'Connecting to '+ address)
f.descr.entryByName['APBSservicesLabel2']['widget'].\
configure(text = "")
f.descr.entryByName['APBSservicesLabel4']['widget'].\
configure(text = "")
f.descr.entryByName['APBSservicesLabel3']['widget'].\
configure(text = "Please wait ...")
f.descr.entryByName['APBSservicesLabel4']['widget'].\
configure(text = "")
self.vf.GUI.ROOT.update()
resp = self.apbsWS.run(address)
f.descr.entryByName['APBSservicesLabel1']['widget'].\
configure(text = "Received Job ID: " + resp._jobID)
self.vf.GUI.ROOT.after(5, self.checkForRemoteResults)
f.descr.entryByName['WS_Run']['widget'].configure(state = 'disabled')
# f.descr.entryByName['APBSservicesLabel1']['widget'].\
# configure(text = 'Remote APBS calculation is done')
self.rml = mol1.name
except Exception, inst:
f.descr.entryByName['APBSservicesLabel3']['widget'].\
configure(text = "")
from ZSI import FaultException
if isinstance(inst, FaultException):
tmp_str = inst.fault.AsSOAP()
tmp_str = tmp_str.split('<message>')
tmp_str = tmp_str[1].split('</message>')
if self.cmdForms.has_key('default') and \
self.cmdForms['default'].f.winfo_toplevel().wm_state() == \
'normal':
tkMessageBox.showerror("ERROR: ",tmp_str[0],parent =
self.cmdForms['default'].root)
else:
tkMessageBox.showerror("ERROR: ",tmp_str[0])
else:
import traceback
traceback.print_stack()
traceback.print_exc()
f.descr.entryByName['APBSservicesLabel1']['widget'].\
configure(text = "")
f.descr.entryByName['APBSservicesLabel2']['widget'].\
configure(text = "ERROR!!! Unable to complete the Run")
f.descr.entryByName['APBSservicesLabel3']['widget'].\
configure(text = "Please open Python Shell for Traceback")
def checkForRemoteResults(self):
"""Checks the queue for remote results until we get one"""
resp = self.apbsWS.appServicePort.queryStatus(queryStatusRequest(self.apbsWS.JobID))
if resp._code == 8: # 8 = GramJob.STATUS_DONE
f = self.cmdForms['default']
f.descr.entryByName['APBSservicesLabel2']['widget'].\
configure(text = resp._message)
webbrowser.open(resp._baseURL)
f.descr.entryByName['APBSservicesLabel3']['widget'].\
configure(text = resp._baseURL,fg='Blue',cursor='hand1')
def openurl(event):
webbrowser.open(resp._baseURL)
f.descr.entryByName['APBSservicesLabel3']['widget'].\
bind(sequence="<Button-1>",func = openurl)
# read the potential back
opener = urllib.FancyURLopener(cert_file = APBS_proxy, key_file = APBS_proxy)
if self.Parallel_flag:
if self.npx*self.npy*self.npz == 1:
f.descr.entryByName['APBSservicesLabel4']['widget'].\
configure(text = "Downloading %s.potential-PE0.dx"%self.rml)
f.descr.entryByName['WS_ProgressBar']['widget'].\
grid(sticky='ew', row = 9, column = 0, columnspan = 2)
f.descr.entryByName['APBS_WS_DX_Label']['widget'].\
configure(text = "URI: "+resp._baseURL+"/%s.potential-PE0.dx"%self.rml)
self.progressBar.configure(progressformat='precent',
labeltext='Progress ... ', max =100)
self.progressBar.set(0)
self._dx = opener.open(resp._baseURL+"/%s.potential-PE0.dx"%self.rml)
self._dx_out = open(os.path.join(self.params.projectFolder,
"%s.potential.dx"%self.rml),"w")
bytes = int(self._dx.headers.dict['content-length'])
self._progress_counter = 0
self._download_bytes = bytes/100
if self._download_bytes == 0: self._download_bytes = 1
self.Download()
else:
f.descr.entryByName['APBSservicesLabel4']['widget'].\
configure(text = "Downloading %s.potential.dx. Please wait ..."%self.rml)
f.descr.entryByName['WS_ProgressBar']['widget'].\
grid(sticky='ew', row = 9, column = 0, columnspan = 2)
f.descr.entryByName['APBS_WS_DX_Label']['widget'].\
configure(text = "URI: "+resp._baseURL+"/%s.potential-PE*.dx"%self.rml)
self.progressBar.configure(progressformat='ratio',
labeltext='Progress ... ', max =self.npx*self.npy*self.npz)
self._progress_counter = 0
self.progressBar.set(0)
self._dx_files = []
for i in range(self.npx*self.npy*self.npz):
self._dx_files.append(opener.open(resp._baseURL+
"/%s.potential-PE%d.dx"%(self.rml,i)))
self._dx_out = open(os.path.join(self.params.projectFolder,
"%s.potential.dx"%self.rml),"w")
self._dx_out.write("# Data from %s\n"%resp._baseURL)
self._dx_out.write("#\n# POTENTIAL (kT/e)\n#\n")
self.Download_and_Merge()
else:
f.descr.entryByName['APBSservicesLabel4']['widget'].\
configure(text = "Downloading %s.potential.dx"%self.rml)
f.descr.entryByName['WS_ProgressBar']['widget'].\
grid(sticky='ew', row = 9, column = 0, columnspan = 2)
f.descr.entryByName['APBS_WS_DX_Label']['widget'].\
configure(text = "URI: "+resp._baseURL + "/%s.potential.dx"%self.rml)
self.progressBar.configure(progressformat='percent',
labeltext='Progress ... ', max =100)
self.progressBar.set(0)
self._dx = opener.open(resp._baseURL + "/%s.potential.dx"%self.rml)
filePath = os.path.join(self.params.projectFolder,"%s.potential.dx"%self.rml)
try:
self._dx_out = open(filePath,"w")
except IOError:
showerror("Download Failed!",
"Permission denied: " +filePath)
bytes = int(self._dx.headers.dict['content-length'])
self._progress_counter = 0
self._download_bytes = bytes/100
if self._download_bytes == 0: self._download_bytes = 1
self.Download()
return
else:
f = self.cmdForms['default']
f.descr.entryByName['APBSservicesLabel2']['widget'].\
configure(text = "Status: " + resp._message)
self.vf.GUI.ROOT.after(500, self.checkForRemoteResults)
def Download(self):
self._progress_counter += 1
if self._progress_counter > 100:
self._progress_counter = 100
self.progressBar.set(self._progress_counter)
tmp = self._dx.read(self._download_bytes)
if tmp:
self._dx_out.write(tmp)
else:
self._dx.close()
self._dx_out.close()
f = self.cmdForms['default']
f.descr.entryByName['WS_ProgressBar']['widget'].grid_forget()
f.descr.entryByName['APBS_WS_DX_Label']['widget'].\
configure(text = '')
f.descr.entryByName['APBSservicesLabel4']['widget'].\
configure(text="%s.potential.dx has been saved"%self.rml)
self.saveProfile(self.params.name, fileFlag=True)
self.changeMenuState('normal')
f.descr.entryByName['WS_Run']['widget'].configure(state = 'normal')
return
self.vf.GUI.ROOT.after(10, self.Download)
def Download_and_Merge(self):
self._dx_files[0].readline()
self._dx_files[0].readline()
self._dx_files[0].readline()
self._dx_files[0].readline()
tmp_str = self._dx_files[0].readline()
from string import split
w = split(tmp_str)
nx, ny, nz = int(w[5]), int(w[6]), int(w[7])
self._dx_out.write("object 1 class gridpositions counts %d %d %d\n"
%(nx*self.npx,ny*self.npy,nz*self.npz))
self._dx_out.write(self._dx_files[0].readline())
self._dx_out.write(self._dx_files[0].readline())
self._dx_out.write(self._dx_files[0].readline())
self._dx_out.write(self._dx_files[0].readline())
self._dx_out.write("object 2 class gridconnections counts %d %d %d\n"
%(nx*self.npx,ny*self.npy,nz*self.npz))
self._dx_out.write("object 3 class array type double rank 0 items %d"
%(nx*self.npx*ny*self.npy*nz*self.npz)+" data follows\n")
for file in self._dx_files[1:]:
for i in range(11):
file.readline()
self._dx_files[0].readline()
self._dx_files[0].readline()
arrays = []
for file in self._dx_files:
self._progress_counter += 1
self.progressBar.set(self._progress_counter)
data = file.readlines()
file.close()
array = Numeric.zeros( (nx,ny,nz), Numeric.Float32)
values = map(split, data[0:-5])
ind=0
size = nx*ny*nz
for line in values:
if ind>=size:
break
l = len(line)
array.flat[ind:ind+l] = map(float, line)
ind = ind + l
arrays.append(array)
self.progressBar.configure(labeltext='Merging ... ')
for k in range(self.npz):
for j in range(self.npy):
for i in range(self.npx):
if i == 0:
array_x = arrays[self.npx*j+
self.npx*self.npy*k]
else:
array_x = Numeric.concatenate(
(array_x,arrays[i+self.npx*j+
self.npx*self.npy*k]),axis=0)
if j == 0:
array_y = array_x
else:
array_y = Numeric.concatenate(
(array_y,array_x),axis=1)
if k == 0:
array_out = array_y
else:
array_out = Numeric.concatenate(
(array_out,array_y),axis=2)
for z in array_out:
for y in z:
for x in y:
self._dx_out.write(str(x)+" ")
self._dx_out.write('\n')
self._dx_out.write("attribute \"dep\" string \"positions\"\n")
self._dx_out.write("object \"regular positions regular connections\" class field\n")
self._dx_out.write("component \"positions\" value 1\n")
self._dx_out.write("component \"connections\" value 2\n")
self._dx_out.write("component \"data\" value 3\n")
self._dx_out.close()
f = self.cmdForms['default']
f.descr.entryByName['WS_ProgressBar']['widget'].grid_forget()
f.descr.entryByName['APBS_WS_DX_Label']['widget'].\
configure(text = '')
f.descr.entryByName['APBSservicesLabel4']['widget'].\
configure(text="%s.potential.dx has been saved"%self.rml)
self.saveProfile(self.params.name, fileFlag=True)
self.changeMenuState('normal')
f.descr.entryByName['WS_Run']['widget'].configure(state = 'normal')
# Forms defined here
def buildFormDescr(self, formName):
"""Builds 'error','ionForm','outputFilesForm','moleculeSelect' and
'default' forms'"""
if formName == 'error':
if self.cmdForms.has_key('default') and \
self.cmdForms['default'].f.winfo_toplevel().wm_state() == \
'normal':
tkMessageBox.showerror("ERROR: ", self.errorMsg,parent =
self.cmdForms['default'].root)
else:
tkMessageBox.showerror("ERROR: ", self.errorMsg)
return
if formName == 'ionForm':
ifd = InputFormDescr(title = "Add Ion")
ifd.append({'name':'ionChargeLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Charge (e):'},
'gridcfg':{'row':0, 'column':0, 'sticky':'wens'}
})
ifd.append({'widgetType':Pmw.EntryField,
'name':'ionCharge',
'wcfg':{'validate':{'validator':'real'}, 'value':1},
'gridcfg':{'row':0, 'column':1, 'sticky':'wens'}
})
ifd.append({'name':'ionConcentrationLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Concentration (M):'},
'gridcfg':{'row':1, 'column':0, 'sticky':'wens'}
})
ifd.append({'widgetType':ThumbWheel,
'name':'ionConcentration',
'wcfg':{'text':None, 'showLabel':1,
'min':0,
'value':0.01, 'oneTurn':0.1,
'type':'float',
'increment':0.01,
'wheelLabcfg1':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'grey'},
'wheelLabcfg2':{'font':
(ensureFontCase('times'), 15, 'bold'), 'fill':'black'},
'continuous':1,
'wheelPad':1, 'width':150,'height':14},
'gridcfg':{'row':1, 'column':1, 'sticky':'wens'}
})
ifd.append({'name':'ionRadiusLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Radius (Angstroms):'},
'gridcfg':{'row':2, 'column':0, 'sticky':'wens'}
})
ifd.append({'widgetType':Pmw.EntryField,
'name':'ionRadius',
'wcfg':{'validate':{'validator':'real','min':0}, 'value':1},
'gridcfg':{'row':2, 'column':1, 'sticky':'wens'}
})
return ifd
elif formName =='outputFilesForm':
ifd = InputFormDescr(title = "Select output files")
ifd.append({'name':'fileTypeLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'File Type'},
'gridcfg':{'sticky':'e', 'row':1, 'column':0}
})
ifd.append({'name':'fileFormatLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'File format'},
'gridcfg':{'sticky':'e', 'row':1, 'column':1}
})
ifd.append({'name':'chargeDistributionFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Charge distribution file: '},
'gridcfg':{'sticky':'e', 'row':2, 'column':0}
})
ifd.append({'name':'chargeDistributionFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'dropdown':1, 'history':0,},
'defaultValue':self.params.chargeDistributionFile,
'gridcfg':{'sticky':'wens', 'row':2, 'column':1}
})
ifd.append({'name':'potentialFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Potential file: '},
'gridcfg':{'sticky':'e', 'row':3, 'column':0}
})
ifd.append({'name':'potentialFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'history':0, 'dropdown':1},
'defaultValue':self.params.potentialFile,
'gridcfg':{'sticky':'wens', 'row':3, 'column':1}
})
ifd.append({'name':'solventAccessibilityFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Solvent accessibility file: '},
'gridcfg':{'sticky':'e', 'row':4, 'column':0}
})
ifd.append({'name':'solventAccessibilityFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'history':0, 'dropdown':1},
'defaultValue':self.params.solventAccessibilityFile,
'gridcfg':{'sticky':'wens', 'row':4, 'column':1}
})
ifd.append({'name':'splineBasedAccessibilityFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Spline-based accessibility file: '},
'gridcfg':{'sticky':'e', 'row':5, 'column':0}
})
ifd.append({'name':'splineBasedAccessibilityFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'history':0, 'dropdown':1},
'defaultValue':self.params.splineBasedAccessibilityFile,
'gridcfg':{'sticky':'wens', 'row':5, 'column':1}
})
ifd.append({'name':'VDWAccessibilityFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'VDW accessibility file: '},
'gridcfg':{'sticky':'e', 'row':6, 'column':0}
})
ifd.append({'name':'VDWAccessibilityFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'history':0, 'dropdown':1},
'defaultValue':self.params.VDWAccessibilityFile,
'gridcfg':{'sticky':'wens', 'row':6, 'column':1}
})
ifd.append({'name':'ionAccessibilityFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Ion accessibility file: '},
'gridcfg':{'sticky':'e', 'row':7, 'column':0}
})
ifd.append({'name':'ionAccessibilityFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'history':0, 'dropdown':1},
'defaultValue':self.params.ionAccessibilityFile,
'gridcfg':{'sticky':'wens', 'row':7, 'column':1}
})
ifd.append({'name':'laplacianOfPotentialFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Laplacian of potential file: '},
'gridcfg':{'sticky':'e', 'row':8, 'column':0}
})
ifd.append({'name':'laplacianOfPotentialFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'history':0, 'dropdown':1},
'defaultValue':self.params.laplacianOfPotentialFile,
'gridcfg':{'sticky':'wens', 'row':8, 'column':1}
})
ifd.append({'name':'energyDensityFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Energy density file: '},
'gridcfg':{'sticky':'e', 'row':9, 'column':0}
})
ifd.append({'name':'energyDensityFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'history':0, 'dropdown':1},
'defaultValue':self.params.energyDensityFile,
'gridcfg':{'sticky':'wens', 'row':9, 'column':1}
})
ifd.append({'name':'ionNumberFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Ion number file: '},
'gridcfg':{'sticky':'e', 'row':10, 'column':0}
})
ifd.append({'name':'ionNumberFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'history':0, 'dropdown':1},
'defaultValue':self.params.ionNumberFile,
'gridcfg':{'sticky':'wens', 'row':10, 'column':1}
})
ifd.append({'name':'ionChargeDensityFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Ion charge density file: '},
'gridcfg':{'sticky':'e', 'row':11, 'column':0}
})
ifd.append({'name':'ionChargeDensityFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'history':0, 'dropdown':1},
'defaultValue':self.params.ionChargeDensityFile,
'gridcfg':{'sticky':'wens', 'row':11, 'column':1}
})
ifd.append({'name':'xShiftedDielectricFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'X-shifted dielectric file: '},
'gridcfg':{'sticky':'e', 'row':12, 'column':0}
})
ifd.append({'name':'xShiftedDielectricFile',
'widgetType':Pmw.ComboBox,
'wcfg':{'scrolledlist_items':self.params.FILETYPES,
'listheight':100, 'history':0, 'dropdown':1},
'defaultValue':self.params.xShiftedDielectricFile,
'gridcfg':{'sticky':'wens', 'row':12, 'column':1}
})
ifd.append({'name':'yShiftedDielectricFileLabel',
'widgetType':Tkinter.Label,
'wcfg':{'text':'Y-shifted dielectric file: '},
'gridcfg':{'sticky':'e', | |
back to r
# Inspired by the Adafruit examples.
#
def color_wheel(self, pos):
pos = 255 - pos
if pos < 85:
return (255 - pos* 3, 0, pos * 3)
elif pos < 170:
pos -= 85
return (0, pos * 3, 255 - pos * 3)
else:
pos -= 170
return (pos * 3, 255 - pos * 3, 0)
#
# Returns a new, random wheel index with a minimum distance of 42 from pos.
#
def get_random_wheel_index(self, pos):
r = 0
x = 0
y = 0
d = 0
while(d < 42):
r = random.randrange(0,256)
x = abs(pos - r)
y = 255 - x
d = min(x, y)
return r
# overload setPixelColor() functions so we can use gamma correction
# (see https://learn.adafruit.com/led-tricks-gamma-correction/the-issue)
def setPixelColor(self, n, *argv):
if len(argv) < 1 and len(argv) > 4:
raise TypeError("Wrong number of arguments")
r=0
g=0
b=0
w=0
if len(argv) == 1:
r = argv[0][0]
g = argv[0][1]
b = argv[0][2]
w = argv[0][3] if self.neo.bpp == 4 else 0
elif len(argv) >= 3:
if len(argv) == 4:
w = argv[3]
r = argv[0]
g = argv[1]
b = argv[2]
if(self.seg.is_gamma()):
self.neo[n] = (gamma8[r], gamma8[g], gamma8[b], gamma8[w]) if self.neo.bpp ==4 else (gamma8[r], gamma8[g], gamma8[b])
else:
self.neo[n] = (r, g, b, w) if self.neo.bpp ==4 else (r, g, b)
#
# color blend function
#
def color_blend(self, color1, color2, blend):
if(blend == 0):
return color1
if(blend == 255):
return color2
r1 = color1[0]
g1 = color1[1]
b1 = color1[2]
w1 = color1[3] if self.neo.bpp == 4 else 0
r2 = color2[0]
g2 = color2[1]
b2 = color2[2]
w2 = color2[3] if self.neo.bpp == 4 else 0
r3 = int(((r2 * blend) + (r1 * (255 - blend))) / 256)
g3 = int(((g2 * blend) + (g1 * (255 - blend))) / 256)
b3 = int(((b2 * blend) + (b1 * (255 - blend))) / 256)
w3 = int(((w2 * blend) + (w1 * (255 - blend))) / 256 if self.neo.bpp == 4 else 0)
return (r3, g3, b3, w3) if self.neo.bpp == 4 else (r3, g3, b3)
######################
# Modes #
######################
#
# No blinking. Just plain old static light.
#
def mode_static(self):
self.fill_seg(self.seg.colors[0])
return self.seg.speed
# Blink/strobe function
# Alternate between color1 and color2
# if(strobe == true) then create a strobe effect
def blink(self, color1, color2, strobe):
if(self.seg_rt.counter_mode_call & 1):
color = color1 if self.seg.is_reverse() else color2 # off
self.fill_seg(color)
self.seg_rt.set_cycle()
return self.seg.speed - 20 if strobe else (self.seg.speed / 2)
else:
color = color2 if self.seg.is_reverse() else color1 # on
self.fill_seg(color)
return 20 if strobe else (self.seg.speed / 2)
#
# Normal blinking. 50% on/off time.
#
def mode_blink(self):
return self.blink(self.seg.colors[0], self.seg.colors[1], False)
#
# Classic Blink effect. Cycling through the rainbow.
#
def mode_blink_rainbow(self):
return self.blink(self.color_wheel(self.seg_rt.counter_mode_call & 0xFF), self.seg.colors[1], False)
#
# Classic Strobe effect.
#
def mode_strobe(self):
return self.blink(self.seg.colors[0], self.seg.colors[1], True)
#
# Classic Strobe effect. Cycling through the rainbow.
#
def mode_strobe_rainbow(self):
return self.blink(self.color_wheel(self.seg_rt.counter_mode_call & 0xFF), self.seg.colors[1], True)
#
# Color wipe function
# LEDs are turned on (color1) in sequence, then turned off (color2) in sequence.
#* if (bool rev == true) then LEDs are turned off in reverse order
#
def color_wipe(self, color1, color2, rev):
if(self.seg_rt.counter_mode_step < self.seg.len):
led_offset = self.seg_rt.counter_mode_step
if(self.seg.is_reverse()):
self.setPixelColor(self.seg.stop - led_offset, color1)
else:
self.setPixelColor(self.seg.start + led_offset, color1)
else:
led_offset = self.seg_rt.counter_mode_step - self.seg.len
if((self.seg.is_reverse() and not rev) or (not self.seg.is_reverse() and rev)):
self.setPixelColor(self.seg.stop - led_offset, color2)
else:
self.setPixelColor(self.seg.start + led_offset, color2)
self.seg_rt.counter_mode_step = (self.seg_rt.counter_mode_step + 1) % (self.seg.len * 2)
if(self.seg_rt.counter_mode_step == 0):
self.seg_rt.set_cycle()
return (self.seg.speed / (self.seg.len * 2))
#
# Lights all LEDs one after another.
#
def mode_color_wipe(self):
return self.color_wipe(self.seg.colors[0], self.seg.colors[1], False)
def mode_color_wipe_inv(self):
return self.color_wipe(self.seg.colors[1], self.seg.colors[0], False)
def mode_color_wipe_rev(self):
return self.color_wipe(self.seg.colors[0], self.seg.colors[1], True)
def mode_color_wipe_rev_inv(self):
return self.color_wipe(self.seg.colors[1], self.seg.colors[0], True)
#
# Turns all LEDs after each other to a random color.
# Then starts over with another color.
#
def mode_color_wipe_random(self):
if(self.seg_rt.counter_mode_step % self.seg.len == 0): # aux_param will store our random color wheel index
self.seg_rt.aux_param = self.get_random_wheel_index(self.seg_rt.aux_param)
color = self.color_wheel(self.seg_rt.aux_param)
return self.color_wipe(color, color, False) * 2
#
# Random color introduced alternating from start and end of strip.
#
def mode_color_sweep_random(self):
if(self.seg_rt.counter_mode_step % self.seg.len == 0): # aux_param will store our random color wheel index
self.seg_rt.aux_param = self.get_random_wheel_index(self.seg_rt.aux_param)
color = self.color_wheel(self.seg_rt.aux_param)
return self.color_wipe(color, color, True) * 2
#
# Lights all LEDs in one random color up. Then switches them
# to the next random color.
#
def mode_random_color(self):
self.seg_rt.aux_param = self.get_random_wheel_index(self.seg_rt.aux_param) # aux_param will store our random color wheel index
color = self.color_wheel(self.seg_rt.aux_param)
self.fill(color, self.seg.start, self.seg.len)
self.seg_rt.set_cycle()
return self.seg.speed
#
# Lights every LED in a random color. Changes one random LED after the other
# to another random color.
#
def mode_single_dynamic(self):
if(self.seg_rt.counter_mode_call == 0):
for i in range(self.seg.start, self.seg.stop+1):
self.setPixelColor(i, self.color_wheel(random.randrange(0,256)))
self.setPixelColor(self.seg.start + random.randrange(self.seg.len), self.color_wheel(random.randrange(0,256)))
self.seg_rt.set_cycle()
return self.seg.speed
#
# Lights every LED in a random color. Changes all LED at the same time
# to new random colors.
#
def mode_multi_dynamic(self):
for i in range(self.seg.start, self.seg.stop+1):
self.setPixelColor(i, self.color_wheel(random.randrange(0,256)))
self.seg_rt.set_cycle()
return self.seg.speed
#
# Does the "standby-breathing" of well known i-Devices. Fixed Speed.
# Use mode "fade" if you like to have something similar with a different speed.
#
def mode_breath(self):
lum = self.seg_rt.counter_mode_step
if(lum > 255):
lum = 511 - lum # lum = 15 -> 255 -> 15
delay = 0
if(lum == 15):
delay = 970 # 970 pause before each breath
elif(lum <= 25):
delay = 38 # 19
elif(lum <= 50):
delay = 36 # 18
elif(lum <= 75):
delay = 28 # 14
elif(lum <= 100):
delay = 20 # 10
elif(lum <= 125):
delay = 14 # 7
elif(lum <= 150):
delay = 11 # 5
else:
delay = 10 # 4
color = self.color_blend(self.seg.colors[1], self.seg.colors[0], lum)
self.fill(color, self.seg.start, self.seg.len)
self.seg_rt.counter_mode_step += 2
if self.seg_rt.counter_mode_step > (512-15):
self.seg_rt.counter_mode_step = 15
self.seg_rt.set_cycle()
return delay
#
# Fades the LEDs between two colors
#
def mode_fade(self):
lum = self.seg_rt.counter_mode_step
if(lum > 255):
lum = 511 - lum # lum = 0 -> 255 -> 0
color = self.color_blend(self.seg.colors[1], self.seg.colors[0], lum)
self.fill(color, self.seg.start, self.seg.len)
self.seg_rt.counter_mode_step += 4
if self.seg_rt.counter_mode_step > 511:
self.seg_rt.counter_mode_step = 0
self.seg_rt.set_cycle()
return self.seg.speed / 128
#
# scan function - runs a block of pixels back and forth.
#
def scan(self, color1, color2, dual):
dir = -1 if self.seg_rt.aux_param else 1
size = 1 << self.seg.size_option()
self.fill(color2, self.seg.start, self.seg.len)
for i in range(0, size):
if self.seg.is_reverse() or dual:
self.setPixelColor(self.seg.stop - self.seg_rt.counter_mode_step - i, color1)
if not self.seg.is_reverse() or dual:
self.setPixelColor(self.seg.start + self.seg_rt.counter_mode_step + i, color1)
self.seg_rt.counter_mode_step += dir
if self.seg_rt.counter_mode_step == 0:
self.seg_rt.aux_param = 0
self.seg_rt.set_cycle()
if self.seg_rt.counter_mode_step >= (self.seg.len - size):
self.seg_rt.aux_param = 1
return int(self.seg.speed / (self.seg.len * 2))
#
# Runs a block of pixels back and forth.
#
def mode_scan(self):
return self.scan(self.seg.colors[0], self.seg.colors[1], False)
#
# Runs two blocks of pixels back and forth in opposite directions.
#
def mode_dual_scan(self):
return self.scan(self.seg.colors[0], self.seg.colors[1], True)
#
# Cycles all LEDs at once through a rainbow.
#
def mode_rainbow(self):
color = self.color_wheel(self.seg_rt.counter_mode_step)
self.fill(color, self.seg.start, self.seg.len)
self.seg_rt.counter_mode_step = (self.seg_rt.counter_mode_step + 1) & 0xFF
if self.seg_rt.counter_mode_step == 0:
self.seg_rt.set_cycle()
return int(self.seg.speed / 256)
#
# Cycles a rainbow over the entire string of LEDs.
#
def mode_rainbow_cycle(self):
for i in range(0, self.seg.len):
color = self.color_wheel(((i * 256 // self.seg.len) + self.seg_rt.counter_mode_step) & 0xFF)
self.setPixelColor(self.seg.start + i, color)
self.seg_rt.counter_mode_step = (self.seg_rt.counter_mode_step + 1) & 0xFF
if(self.seg_rt.counter_mode_step == 0):
self.seg_rt.set_cycle()
return int(self.seg.speed / 256)
#
# Tricolor chase function
#
def tricolor_chase(self, color1, color2, color3):
sizeCnt = 1 << self.seg.size_option()
sizeCnt2 = sizeCnt + sizeCnt
sizeCnt3 = sizeCnt2 + sizeCnt
index = self.seg_rt.counter_mode_step % sizeCnt3
for i in range(0, self.seg.len):
index = index % sizeCnt3
index += 1
color | |
##--------------------Coding References---------------------##
# Percentage of borrowed code: 5% - Parts of the MC-dropout and data preprocessing normalisation step are borrowed,
# The UNET model is adapted from my own previous work on 3D UNETs. It was modified to include 2D convolution layers and dropout layers.
# [1] <NAME> (2020) Measuring uncertainty using MC Dropout on pytorch, Available at:
# https://stackoverflow.com/questions/63285197/measuring-uncertainty-using-mc-dropout-on-pytorch (Accessed: 21st July 2021).
# [2] <NAME> (2020) Brain-tumour-segmentation-master, Available at:
# https://github.com/ER-ALOK/brain-tumor-segmentation-master/blob/master/extract_patches.py (Accessed: 10th July 2021).
# [3] <NAME> (2021) INM705_Coursework, Available at:
# https://github.com/Assassinsarms/INM705_Coursework/blob/master/UNET3D.py (Accessed: 20th March 2021).
from glob import glob
import os
import errno
import sys
import time
from collections import OrderedDict
import imageio
import logging
import numpy as np
from skimage.io import imread, imsave
from hausdorff import hausdorff_distance
from sklearn.model_selection import train_test_split
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms.functional as tf
import torch.utils.data as data
from torch.cuda import amp
import torch.backends.cudnn as cudnn
from tqdm import tqdm
import pickle
import pandas as pd
from train_val import train, validate
from loss import BCEDiceLoss
from metrics import *
from utils import *
from model import *
from dataset import *
def enable_dropout(model):
"""
enable the dropout layers during test-time
"""
for m in model.modules():
if m.__class__.__name__.startswith('Dropout'):
m.train()
def mc_samples(data_loader, forward_passes, iteration, nb_active_learning_iter_size, model, n_classes, n_samples, device, height, width):
"""
a pool of unlabelled images is fed into the trained U-Net and a measure of uncertainty is computed for each unlabelled sample.
entropy is used as a measure of uncertainty and is taken across different predictions.
:param data_loader:
unlabelled dataset
:param forward_passes:
number of monte-carlo samples/forward passes
:param iteration:
current active learning iteration
:param nb_active_learning_iter_size:
number of samples to be selected for annotation
:param model:
unet
:param n_classes:
number of classes (edema, non ET, ET)
:param n_samples:
number of samples in the unlabelled set
:param device:
GPU device
:param height:
height of MRI slices
:param width:
width of MRI slices
:return avg_pred:
array of average monte-carlo predictions for each forward pass
:return uncertainty_maps:
uncertainty maps for each average monte-carlo prediction
"""
dropout_predictions = np.empty((0, n_samples-(iteration*nb_active_learning_iter_size), n_classes, height, width), dtype=np.float32)
for i in tqdm(range(forward_passes)): # perform x forward passes to obtain x monte-carlo predictions
predictions_forward_pass = np.empty((0, n_classes, height, width))
model.eval()
enable_dropout(model)
with torch.no_grad():
for batch_idx, (data, labels) in enumerate(data_loader):
data = data.to(device)
preds = model(data)
preds = torch.sigmoid(preds).data.cpu().numpy()
predictions_forward_pass = np.vstack((predictions_forward_pass, preds))
dropout_predictions = np.vstack((dropout_predictions, predictions_forward_pass[np.newaxis, :, :, :, :]))
avg_pred = np.mean(dropout_predictions, axis=0)
epsilon = sys.float_info.min
uncertainty_maps = -np.sum(avg_pred*np.log(avg_pred + epsilon), axis=1) # entropy-based uncertainty measure
dropout_predictions = 0
predictions_forward_pass = 0
return avg_pred, uncertainty_maps
def uncertainty_scoring(uncertainty_maps):
"""
uncertainty maps are given an uncertainty score
:param uncertainty_maps:
trained Unet model
:return uncertainty_scores:
numpy array with an avg uncertainty score for each image in the unlabelled set
:return overall_uncertainty_score:
overall uncertainty score for the unlabelled set
"""
uncertainty_scores = np.empty((uncertainty_maps.shape[0]))
for i in range(uncertainty_maps.shape[0]):
uncertainty_scores[i] = np.mean(uncertainty_maps[i])
overall_uncertainty_score = np.sum(uncertainty_maps)
return uncertainty_scores, overall_uncertainty_score
def add_annotated_sample(to_be_annotated_index, labelled_img_paths, labelled_mask_paths, unlabelled_img_paths, unlabelled_mask_paths):
"""
the labelled dataset is amended to include the samples to be annotated and the unlabelled dataset is amended to remove the samples to be annotated.
:param to_be_annotated_index:
index of samples selected for annotation
:param labelled_img_paths:
path of labelled MRI slices
:param labelled_mask_paths:
path of labels
:param unlabelled_img_paths:
path of unlabelled MRI slices
:param unlabelled_mask_paths:
path of labels for the unlabelled dataset (to be used during annotation)
:return new_labelled_img_paths, new_labelled_mask_paths, new_unlabelled_img_paths, new_unlabelled_mask_paths:
return updated paths for labelled dataset and unlabelled dataset after samples to be annotated are removed from the unlabelled dataset
"""
samples_to_be_annotated = [unlabelled_img_paths[i] for i in to_be_annotated_index]
annotation = [unlabelled_mask_paths[i] for i in to_be_annotated_index]
new_labelled_img_paths = labelled_img_paths + samples_to_be_annotated
new_labelled_mask_paths = labelled_mask_paths + annotation
new_unlabelled_img_paths = list(np.delete(unlabelled_img_paths, to_be_annotated_index))
new_unlabelled_mask_paths = list(np.delete(unlabelled_mask_paths, to_be_annotated_index))
return new_labelled_img_paths, new_labelled_mask_paths, new_unlabelled_img_paths, new_unlabelled_mask_paths
def main():
"""
Instructions:
1. Change 'random_bool' to True or False depending on if you wish to run random learning or active learning iteration
2. Configure 'nb_experiments', 'nb_active_learning_iter', nb_active_learning_iter_size', 'FORWARD_PASSES' parameters as desired
Note: computing MC predictions can take long depending on how many stochastic forward passes are used. Refer to 'active_learning_output.txt' to see
training times
"""
# load the training set paths, unlabelled set paths and test set paths
# these are lists containing paths to the slices selected for each dataset as computed in the 'data_split.ipynb' file.
# all MRI slices can be found in the 'all_data' folder
TRAIN_IMG_DIR = pickle.load(open('data\\train_val\\img\\'+'train_val.data', 'rb'))
TRAIN_LABEL_DIR = pickle.load(open('data\\train_val\\label\\'+'train_val.mask', 'rb'))
UNLABELLED_IMG_DIR = pickle.load(open('data\\unlabelled\\img\\'+'unlabelled.data', 'rb'))
UNLABELLED_LABEL_DIR = pickle.load(open('data\\unlabelled\\label\\'+'unlabelled.mask', 'rb'))
TEST_IMG_DIR = pickle.load(open('data\\test\\img\\'+'test.data', 'rb'))
TEST_LABEL_DIR = pickle.load(open('data\\test\\label\\'+'test.mask', 'rb'))
# split data into train/val (labelled, 20%), unlabelled (active learning simulation, 60%), test (20%) - using 5000 slices
random_bool = False # controls whether the samples chosen for labelling is entered random - normal training or active - active learning
height, width = 160, 160
n_initial_unlabelled_samples = len(UNLABELLED_IMG_DIR)
n_classes = 3 # edema, non-enhancing tumour, enhancing tumour
nb_experiments = 1 # number of experiments
nb_active_learning_iter = 5 # number of active learning iterations e.g. 15
nb_active_learning_iter_size = 30 # number of samples to be added to the training set after each active learning iteration - number of labels requested from oracle e.g. 30
FORWARD_PASSES = 2 # number of monte carlo predictions are used to calculate uncertainty e.g. 15
EPOCHS = 200 # early stopping epoch criteria for retraining during active or random learning
LEARNING_RATE = 1e-3
EARLY_STOP = 25
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' # GPU support
BATCH_SIZE = 32
if random_bool == True:
learning_type = 'random'
else:
learning_type = 'active'
logging.basicConfig(level=logging.INFO, # instantiate a logger
format='%(asctime)s - %(levelname)s - %(message)s',
filename= learning_type + '_learning_results/'+ learning_type + '_learning_output.txt',
filemode='w')
console = logging.StreamHandler() # define a new Handler to log to console as well
console.setLevel(logging.INFO) # set the logging level
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') # set a format which is the same for console use
console.setFormatter(formatter) # tell the handler to use this format
logging.getLogger('').addHandler(console) # add the handler to the root logger
# model, optimiser
model = UNet2D(in_channels=4, out_channels=3).to(DEVICE)
logging.info("=> Creating 2D UNET Model")
loss_fn = BCEDiceLoss()
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=LEARNING_RATE)
# initisialise results trackers
mean_WT_dice = np.zeros((nb_experiments, nb_active_learning_iter))
mean_TC_dice = np.zeros((nb_experiments, nb_active_learning_iter))
mean_ET_dice = np.zeros((nb_experiments, nb_active_learning_iter))
mean_WT_precision = np.zeros((nb_experiments, nb_active_learning_iter))
mean_TC_precision = np.zeros((nb_experiments, nb_active_learning_iter))
mean_ET_precision = np.zeros((nb_experiments, nb_active_learning_iter))
mean_WT_recall = np.zeros((nb_experiments, nb_active_learning_iter))
mean_TC_recall = np.zeros((nb_experiments, nb_active_learning_iter))
mean_ET_recall = np.zeros((nb_experiments, nb_active_learning_iter))
mean_WT_Hausdorff = np.zeros((nb_experiments, nb_active_learning_iter))
mean_TC_Hausdorff = np.zeros((nb_experiments, nb_active_learning_iter))
mean_ET_Hausdorff = np.zeros((nb_experiments, nb_active_learning_iter))
overall_start = time.time()
# iterating over the number of experiments
for r in range(nb_experiments):
logging.info("\n*****************EXPERIMENT " + str(r+1) + " IS STARTING********************")
# initialise paths with original unlabelled and labelled datasets
labelled_img_paths, labelled_mask_paths = TRAIN_IMG_DIR, TRAIN_LABEL_DIR
unlabelled_img_paths, unlabelled_mask_paths = UNLABELLED_IMG_DIR, UNLABELLED_LABEL_DIR
test_img_paths, test_mask_paths = TEST_IMG_DIR, TEST_LABEL_DIR
# keep the validation set the same for each experiment, hence each active learning iteration
labelled_img_paths, val_img_paths, labelled_mask_paths, val_mask_paths = train_test_split(labelled_img_paths,
labelled_mask_paths, test_size=0.2, random_state=41)
# iterating over the number of active learning loops
for i in range(nb_active_learning_iter):
# initialise directories for model weights storage after every active learning iteration - for debugging purposes
if random_bool == True:
model_path = 'models/random_trained/2DUNET_experiment_' + str(r+1) + '_iter_' + str(i) + '.pth'
results_path = 'random_learning_results'
data_type = 'random'
else:
model_path = 'models/active_trained/2DUNET_experiment_' + str(r+1) + '_iter_' + str(i) + '.pth'
results_path = 'active_learning_results'
data_type = 'uncertain'
if i == 0:
# if the active learning iteration is 0, load weights from base training set
model.load_state_dict(torch.load('models/base_trained/2DUNET.pth'))
path = results_path + "/experiment_" + str(r+1) + "/iteration_" + str(i)
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
logging.info("\n---------STARTING AL ITERATION NUMBER " + str(i) + "----------")
# load unlabelled dataset
unlabelled_dataset = Dataset(unlabelled_img_paths, unlabelled_mask_paths)
unlabelled_loader = torch.utils.data.DataLoader(
unlabelled_dataset,
batch_size=BATCH_SIZE,
shuffle=False,
pin_memory=True,
drop_last=False)
# select samples to be annotated by an oracle, either randomly or based on an uncertainty measure (MC-dropout + entropy)
if random_bool == True:
to_be_added_random | |
#!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = '<EMAIL> (<NAME>)'
from datetime import datetime
from datetime import time
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from models import ConflictException
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import StringMessage
from models import BooleanMessage
from models import Conference
from models import ConferenceForm
from models import ConferenceForms
from models import ConferenceQueryForm
from models import ConferenceQueryForms
from models import TeeShirtSize
from models import Session
from models import SessionForm
from models import SessionForms
from settings import WEB_CLIENT_ID
# from settings import ANDROID_CLIENT_ID
# from settings import IOS_CLIENT_ID
# from settings import ANDROID_AUDIENCE
from utils import getUserId
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT_ANNOUNCEMENTS"
MEMCACHE_FEATURED_SPEAKER_KEY = "FEATURED_SPEAKER"
ANNOUNCEMENT_TPL = ('Last chance to attend! The following conferences '
'are nearly sold out: %s')
SPEAKER_TPL = ('See %s speaking at %s.')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": ["Default", "Topic"],
}
SESSION_DEFAULTS = {
"duration": 60,
"typeOfSession": "Default type"
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage, websafeConferenceKey=messages.StringField(1),)
CONF_POST_REQUEST = endpoints.ResourceContainer(
ConferenceForm, websafeConferenceKey=messages.StringField(1),)
SESSION_CREATE = endpoints.ResourceContainer(
SessionForm, websafeConferenceKey=messages.StringField(1),)
SESSIONS_GET = endpoints.ResourceContainer(
SessionForms, websafeConferenceKey=messages.StringField(1),)
SESSIONS_GETBYTYPE = endpoints.ResourceContainer(
SessionForms, websafeConferenceKey=messages.StringField(1),
type=messages.StringField(2),)
SESSIONS_GETBYSPEAKER = endpoints.ResourceContainer(
SessionForms, speaker=messages.StringField(1),)
SESSION_WISHLIST = endpoints.ResourceContainer(
SessionForms, websafeSessionKey=messages.StringField(1),)
CONF_GET_CITY = endpoints.ResourceContainer(
message_types.VoidMessage, city=messages.StringField(1),)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.api(name='conference', version='v1',
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Conference objects - - - - - - - - - - - - - - - - -
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
"""Create or update Conference object, returning ConferenceForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound Message)
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# convert dates from strings to Date objects; set month based on start_date
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
# generate Profile Key based on user ID and Conference
# ID based on Profile key get Conference key from ID
p_key = ndb.Key(Profile, user_id)
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create Conference, send email to organizer confirming
# creation of Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(),'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email')
return request
@ndb.transactional()
def _updateConferenceObject(self, request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# update existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# Not getting all the fields, so don't create a new object; just
# copy relevant fields from ConferenceForm to Conference object
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name in ('startDate', 'endDate'):
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'startDate':
conf.month = data.month
# write to Conference object
setattr(conf, field.name, data)
conf.put()
prof = ndb.Key(Profile, user_id).get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(ConferenceForm, ConferenceForm, path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
@endpoints.method(CONF_POST_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='PUT', name='updateConference')
def updateConference(self, request):
"""Update conference w/provided fields & return w/updated info."""
return self._updateConferenceObject(request)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
"""Return requested conference (by websafeConferenceKey)."""
# get Conference object from request; bail if not found
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
prof = conf.key.parent().get()
# return ConferenceForm
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated',
http_method='GET', name='getConferencesCreated')
def getConferencesCreated(self, request):
"""Return conferences created by user."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# create ancestor query for all key matches for this user
confs = Conference.query(ancestor=ndb.Key(Profile, user_id))
prof = ndb.Key(Profile, user_id).get()
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, getattr(prof, 'displayName')) for conf in confs]
)
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"], filtr["operator"], filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name) for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException("Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException("Inequality filter is allowed on only one field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST',
name='queryConferences')
def queryConferences(self, request):
"""Query for conferences."""
conferences = self._getQuery(request)
# need to fetch organiser displayName from profiles
# get all keys and use get_multi for speed
organisers = [(ndb.Key(Profile, conf.organizerUserId)) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return individual ConferenceForm object per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, names[conf.organizerUserId]) for conf in
conferences])
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get Profile from datastore
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# create new Profile if not there
if not profile:
profile = Profile(
key=p_key,
displayName=user.nickname(),
mainEmail=user.email(),
teeShirtSize=str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and | |
<filename>cyanophage-light-dark-transcriptomics/code/RNAseq_parser_gff_directional.py
#!/usr/bin/env python
'''
<NAME>, Dec 2010 - Oct 2011, Feb 2012
Usage: RNAseq_parser_gff.py -i <input SAM-formatted alignment file> -o <output> -d <dir with gff files> -p <tells it to print out intergenics)
IMPORTANT: Script requires a tab-delimited file of annotated genome regions for each reference genome with reads mapped, name of <chromosome_name>.gff
->This is currently set up to use the GFF3 annotation file format
I know that this code is awful and clunky and could be better written... but at least it works for now
If mapping RNAseq reads against multiple reference genomes (ie for a phage and its host) both chromosomes should have been in one multi-fasta file for the bwa alignment reference
DEPENDENCIES:
-needs samtools installed in path
-requires pysam module
Goal: count number of transcripts hitting a defined genomic region, and figure out how many of those are sense vs antisense (given orientation expected of dUTP second strand protocol)
This script takes a SAM alignment file (generated by BWA, etc), then utilizes the samtools package to convert it into an indexed binary SAM file which is then used to count the number
of reads aligning to a given region using the fetch() method from pysam
Still need to fix:
-should probably update old-style OptionParser references to use the newer argparse calls... but this works fine for now - esp if Python is not V 2.7
NOTES ABOUT COUNTING APPROACH:
If a read (paired or unpaired) spans two ORFs, then it will be counted once in each ORF
For paired end reads.... if both ends map within one ORF region, then that fragment is only counted once. If one end is in one ORF and the other end is at least partially in another ORF, then it counts once in each.
'''
import sys, re, pysam, os
from optparse import OptionParser
from operator import itemgetter
#Define columns in genome file where relevant information are located (first column=0)
strand_column = 6 #Strand, + or -
begin_column = 3 #Gene start
end_column = 4 #Gene end
type_column = 2
info_column = 8
feature_types=["CDS","rRNA","tRNA"]
dirname="./"
printIntergics=False
printedHeader=False
#tag to indicate header line to ignore in genome file
headertag="#"
#initialize stuff
reads_in_region = {}
sense_in_region = {}
antisense_in_region = {}
allregionlists={}
alllengthlists={}
totalreads = 0
totalreads_rna = 0
def check_files(samname,reference_length_list):
#This function verifies that all genome table files are present, and that the binary indexed BAM files exist (or will generate them if needed)
#Returns the name of the sorted bam file to be used for read counting
#check genome table files
for item in reference_length_list:
chromosome_name=item[0]
genome_file_name=dirname+"/"+chromosome_name+".gff"
# print "looking for "+genome_file_name
if not os.path.isfile(genome_file_name):
err_msg= "Missing genome table file for chromosome %s, should be named %s" % (chromosome_name,genome_file_name)
sys.exit(err_msg)
#To do this searching efficiently, we need to use a sorted, indexed file in BAM format... check if it's there already, if not, call samtools to make them
#depends on this naming scheme: for file 'aln', would be aln.sam -> aln.bam -> aln-sorted.bam
basename = samname[0:-4]
bamname = basename+".bam"
if not os.path.isfile(bamname):
print "Converting SAM file to BAM format"
callstring = "samtools view -bS -o %s %s" % (bamname,samname)
os.system(callstring)
sortedname = basename+"-sorted.bam"
if not os.path.isfile(basename+"-sorted.bam"):
print "Sorting and indexing BAM file"
os.system("samtools sort %s %s" % (bamname,basename+"-sorted"))
os.system("samtools index %s" % (basename+"-sorted.bam"))
return basename+"-sorted.bam"
def read_genome_table(chromosome_name):
#Read in ORF/feature defintions from a tab-delimited file; assume that name must be <chromosome_name>.txt
#Returns two lists, one of all orfs, one of the intergenic regions in this genome
filename=dirname+"/"+chromosome_name+".gff"
file_handle=open(filename,"rU")
allorfs=list()
for line in file_handle:
#skip a header line
if (line.strip()=='##FASTA'):
break
elif (line[0]==headertag):
continue
else:
#print line
parts = line.split("\t")
if parts[type_column] in feature_types:
infostr=parts[info_column].strip()
infoparts = infostr.split(";")
nameindex=0
index=0
for item in infoparts:
if "Name=" in item:
nameindex=index
index+=1
orfinfo = (infoparts[0],parts[strand_column],int(parts[begin_column]),int(parts[end_column]),infoparts[nameindex].strip())
allorfs.append(orfinfo)
#Make sure that the ORFs are in linear order, helps with defining intergenic regions in the next step
#Intergenic regions are treated differently for RPKMO calculation
allorfs = sorted(allorfs, key=itemgetter(2))
intergenic_regions=list()
#define intergenic regions and make a list of them
last_end = 0
final_end = 0
#Iterate through the defined ORF regions and generate locations between these
for item in allorfs:
start=item[2]
end=item[3]
if(start-last_end > 1):
#intergenic region exists
thisinfo = ("intergenic_"+str(last_end+1)+"_"+str(start-1),"+",last_end+1,start-1)
intergenic_regions.append(thisinfo)
last_end=end
if(end>final_end):
final_end=end
#check on trailing intergenic between last feature and end of reference sequence - at some point this needs to be merged with any
#intergenic space starting at position 1 (not sure if samtools understands circular chromosomes)
if(reference_length > final_end):
thisinfo = ("intergenic_"+str(final_end+1)+"_"+str(reference_length),"+",final_end+1,reference_length)
intergenic_regions.append(thisinfo)
file_handle.close()
return allorfs, intergenic_regions
def count_reads_in_region(chromosome_name,first, last):
#Use the .fetch() method from the pysam module to get a list of all the reads mapping to a given region of a chromosome, then
#return the length of that list to get the numerical count
#sets are much faster than lists
#keep track of read names used within the region... if both ends are within this region, don't double-count it
#this resets with each region, so it does not preclude counting a mate that maps within a different operon, intentionally
processed_reads=set()
total=0
plus=0
minus=0
for alignedread in samfile.fetch(chromosome_name, first, last):
thisname=alignedread.qname
if(thisname not in processed_reads):
total+=1
processed_reads.add(thisname)
if(alignedread.is_paired):
#if paired, treat read1 and read2 as below
if(alignedread.is_read1):
if(alignedread.is_reverse):
minus+=1
else:
plus+=1
else:
if(alignedread.is_reverse):
plus+=1
else:
minus+=1
else:
#unpaired read
# if(alignedread.is_read2):
# print "unpaired, read2"
# else:
# print "unpaired, read1"
if(alignedread.is_reverse):
minus+=1
else:
plus+=1
return total,plus,minus
def collect_data(chromosome_name,reference_length,orflist,intergeniclist):
#Main function for going through a list of region identifiers, counting the number of reads mapped, and outputting this to a file
length={}
global totalreads
global totalreads_rna
#Get total number of reads that mapped to this chromosome for RPKM calculation
myreads,x,y = count_reads_in_region(chromosome_name,1,reference_length)
myreads_rna = 0 #variable for counting only reads that map to rna
for item in intergeniclist:
#go through list of intergenics, get counts of mapped reads
strand="+"
start=item[2]
end=item[3]
length[item]=end-start+1
total,plus,minus = count_reads_in_region(chromosome_name,start,end)
reads_in_region[item]=total
sense_in_region[item]=plus
antisense_in_region[item]=minus
for item in orflist:
#go through list of ORFs, get counts of mapped reads, keep track of totals
strand=item[1]
start=item[2]
end=item[3]
length[item]=end-start+1
total,plus,minus = count_reads_in_region(chromosome_name,start,end)
reads_in_region[item]=total
#assuming dUTP directionality... read should be from second strand
if(strand=="-"):
# if(strand=="+"):
sense_in_region[item]=plus
antisense_in_region[item]=minus
else:
sense_in_region[item]=minus
antisense_in_region[item]=plus
if "rna" in item[0]:
myreads_rna += reads_in_region[item]
#Prepare for final output - make ordered master list of all regions tested
allregions = intergeniclist+orflist
allregions = sorted(allregions, key=itemgetter(2))
print " Reads mapped to this genome = "+str(myreads)
print " Non-RNA reads in this genome = "+str((myreads-myreads_rna))
totalreads += myreads
totalreads_rna += myreads_rna
return allregions,length
def check_sam_header(infile):
#Go through header of SAM file and parse out the genome reference name and the length of that genome
#Returns a list with tuples of (name,length) pairs in case multiple references are submitted
#In SAM format, SN defines reference seq name, LN is length; this has been tested with SAM files generated by BWA only
reference_length_list = list()
samhandle=open(infile,"rU")
for line in samhandle:
if (line[0] != "@"):
break
elif ("@SQ" in line): #header line
m = re.search("SN:(.+?)\s",line)
n = re.search("LN:(\d+?)\s",line)
name = m.group(1)
n = int(n.group(1))
reference_length_list.append((name,n))
samhandle.close()
return reference_length_list
if __name__ == "__main__":
#Main program loop
usage="Usage: RNAseq_parser.py -i <input SAM-formatted alignment file> -o <output> -d <directory containing genome definition files in gff format, named chromosomename.gff> -p <print out intergenic region data>"
#Check that all options have been passed
if not len(sys.argv) >= 5:
sys.exit("Error: not all arguments specified\n"+usage)
parser = OptionParser(usage)
parser.add_option("-i",dest="infile_sam",help="input SAM-formatted alignment file")
parser.add_option("-o",dest="outfile",help="output file name")
parser.add_option("-d",dest="dirname",help="directory containing genome definition files",default="./")
parser.add_option("-p",action="store_true",dest="intergenics",help="print values for intergenic regions",default=False)
(options, args) = parser.parse_args()
#Output
output_handle = open(options.outfile, "w")
dirname=options.dirname
printIntergenics=options.intergenics
#check header of sam file
#get length of reference sequence for determining intergenics
reference_length_list=check_sam_header(options.infile_sam)
#check to see if BAM indexed files exist, otherwise make them; also verify that genome table files exist before we begin processing
bamfilename = check_files(options.infile_sam,reference_length_list)
#create samfile object for all of the read counting... global
samfile = pysam.Samfile(bamfilename, 'rb')
#main loop for counting reads mapping to each reference chromosome in file
for items in reference_length_list:
reference_length = | |
= nodeNumber
self.referencePosition = referencePosition
self.referenceRotation = referenceRotation
self.visualization = visualization
def __iter__(self):
yield 'objectType', 'Mass1D'
yield 'name', self.name
yield 'physicsMass', self.physicsMass
yield 'nodeNumber', self.nodeNumber
yield 'referencePosition', self.referencePosition
yield 'referenceRotation', self.referenceRotation
yield 'Vshow', dict(self.visualization)["show"]
yield 'VgraphicsData', dict(self.visualization)["graphicsData"]
#add typedef for short usage:
Mass1D = ObjectMass1D
VMass1D = VObjectMass1D
class VObjectRotationalMass1D:
def __init__(self, show = True, graphicsData = []):
self.show = show
self.graphicsData = graphicsData
def __iter__(self):
yield 'show', self.show
yield 'graphicsData', self.graphicsData
class ObjectRotationalMass1D:
def __init__(self, name = '', physicsInertia = 0., nodeNumber = -1, referencePosition = [0.,0.,0.], referenceRotation = IIDiagMatrix(rowsColumns=3,value=1), visualization = {'show': True, 'graphicsData': []}):
self.name = name
self.physicsInertia = physicsInertia
self.nodeNumber = nodeNumber
self.referencePosition = referencePosition
self.referenceRotation = referenceRotation
self.visualization = visualization
def __iter__(self):
yield 'objectType', 'RotationalMass1D'
yield 'name', self.name
yield 'physicsInertia', self.physicsInertia
yield 'nodeNumber', self.nodeNumber
yield 'referencePosition', self.referencePosition
yield 'referenceRotation', self.referenceRotation
yield 'Vshow', dict(self.visualization)["show"]
yield 'VgraphicsData', dict(self.visualization)["graphicsData"]
#add typedef for short usage:
Rotor1D = ObjectRotationalMass1D
VRotor1D = VObjectRotationalMass1D
class VObjectRigidBody:
def __init__(self, show = True, graphicsDataUserFunction = 0, graphicsData = []):
self.show = show
self.graphicsDataUserFunction = graphicsDataUserFunction
self.graphicsData = graphicsData
def __iter__(self):
yield 'show', self.show
yield 'graphicsDataUserFunction', self.graphicsDataUserFunction
yield 'graphicsData', self.graphicsData
class ObjectRigidBody:
def __init__(self, name = '', physicsMass = 0., physicsInertia = [0.,0.,0., 0.,0.,0.], physicsCenterOfMass = [0.,0.,0.], nodeNumber = -1, visualization = {'show': True, 'graphicsDataUserFunction': 0, 'graphicsData': []}):
self.name = name
self.physicsMass = physicsMass
self.physicsInertia = physicsInertia
self.physicsCenterOfMass = physicsCenterOfMass
self.nodeNumber = nodeNumber
self.visualization = visualization
def __iter__(self):
yield 'objectType', 'RigidBody'
yield 'name', self.name
yield 'physicsMass', self.physicsMass
yield 'physicsInertia', self.physicsInertia
yield 'physicsCenterOfMass', self.physicsCenterOfMass
yield 'nodeNumber', self.nodeNumber
yield 'Vshow', dict(self.visualization)["show"]
yield 'VgraphicsDataUserFunction', dict(self.visualization)["graphicsDataUserFunction"]
yield 'VgraphicsData', dict(self.visualization)["graphicsData"]
#add typedef for short usage:
RigidBody = ObjectRigidBody
VRigidBody = VObjectRigidBody
class VObjectRigidBody2D:
def __init__(self, show = True, graphicsDataUserFunction = 0, graphicsData = []):
self.show = show
self.graphicsDataUserFunction = graphicsDataUserFunction
self.graphicsData = graphicsData
def __iter__(self):
yield 'show', self.show
yield 'graphicsDataUserFunction', self.graphicsDataUserFunction
yield 'graphicsData', self.graphicsData
class ObjectRigidBody2D:
def __init__(self, name = '', physicsMass = 0., physicsInertia = 0., nodeNumber = -1, visualization = {'show': True, 'graphicsDataUserFunction': 0, 'graphicsData': []}):
self.name = name
self.physicsMass = physicsMass
self.physicsInertia = physicsInertia
self.nodeNumber = nodeNumber
self.visualization = visualization
def __iter__(self):
yield 'objectType', 'RigidBody2D'
yield 'name', self.name
yield 'physicsMass', self.physicsMass
yield 'physicsInertia', self.physicsInertia
yield 'nodeNumber', self.nodeNumber
yield 'Vshow', dict(self.visualization)["show"]
yield 'VgraphicsDataUserFunction', dict(self.visualization)["graphicsDataUserFunction"]
yield 'VgraphicsData', dict(self.visualization)["graphicsData"]
#add typedef for short usage:
RigidBody2D = ObjectRigidBody2D
VRigidBody2D = VObjectRigidBody2D
class VObjectGenericODE2:
def __init__(self, show = True, color = [-1.,-1.,-1.,-1.], triangleMesh = [], showNodes = False, graphicsDataUserFunction = 0):
self.show = show
self.color = color
self.triangleMesh = triangleMesh
self.showNodes = showNodes
self.graphicsDataUserFunction = graphicsDataUserFunction
def __iter__(self):
yield 'show', self.show
yield 'color', self.color
yield 'triangleMesh', self.triangleMesh
yield 'showNodes', self.showNodes
yield 'graphicsDataUserFunction', self.graphicsDataUserFunction
class ObjectGenericODE2:
def __init__(self, name = '', nodeNumbers = [], massMatrix = [], stiffnessMatrix = [], dampingMatrix = [], forceVector = [], forceUserFunction = 0, massMatrixUserFunction = 0, visualization = {'show': True, 'color': [-1.,-1.,-1.,-1.], 'triangleMesh': [], 'showNodes': False, 'graphicsDataUserFunction': 0}):
self.name = name
self.nodeNumbers = nodeNumbers
self.massMatrix = massMatrix
self.stiffnessMatrix = stiffnessMatrix
self.dampingMatrix = dampingMatrix
self.forceVector = forceVector
self.forceUserFunction = forceUserFunction
self.massMatrixUserFunction = massMatrixUserFunction
self.visualization = visualization
def __iter__(self):
yield 'objectType', 'GenericODE2'
yield 'name', self.name
yield 'nodeNumbers', self.nodeNumbers
yield 'massMatrix', self.massMatrix
yield 'stiffnessMatrix', self.stiffnessMatrix
yield 'dampingMatrix', self.dampingMatrix
yield 'forceVector', self.forceVector
yield 'forceUserFunction', self.forceUserFunction
yield 'massMatrixUserFunction', self.massMatrixUserFunction
yield 'Vshow', dict(self.visualization)["show"]
yield 'Vcolor', dict(self.visualization)["color"]
yield 'VtriangleMesh', dict(self.visualization)["triangleMesh"]
yield 'VshowNodes', dict(self.visualization)["showNodes"]
yield 'VgraphicsDataUserFunction', dict(self.visualization)["graphicsDataUserFunction"]
class VObjectFFRF:
def __init__(self, show = True, color = [-1.,-1.,-1.,-1.], triangleMesh = [], showNodes = False):
self.show = show
self.color = color
self.triangleMesh = triangleMesh
self.showNodes = showNodes
def __iter__(self):
yield 'show', self.show
yield 'color', self.color
yield 'triangleMesh', self.triangleMesh
yield 'showNodes', self.showNodes
class ObjectFFRF:
def __init__(self, name = '', nodeNumbers = [], massMatrixFF = [], stiffnessMatrixFF = [], dampingMatrixFF = [], forceVector = [], forceUserFunction = 0, massMatrixUserFunction = 0, computeFFRFterms = True, objectIsInitialized = False, visualization = {'show': True, 'color': [-1.,-1.,-1.,-1.], 'triangleMesh': [], 'showNodes': False}):
self.name = name
self.nodeNumbers = nodeNumbers
self.massMatrixFF = massMatrixFF
self.stiffnessMatrixFF = stiffnessMatrixFF
self.dampingMatrixFF = dampingMatrixFF
self.forceVector = forceVector
self.forceUserFunction = forceUserFunction
self.massMatrixUserFunction = massMatrixUserFunction
self.computeFFRFterms = computeFFRFterms
self.objectIsInitialized = objectIsInitialized
self.visualization = visualization
def __iter__(self):
yield 'objectType', 'FFRF'
yield 'name', self.name
yield 'nodeNumbers', self.nodeNumbers
yield 'massMatrixFF', self.massMatrixFF
yield 'stiffnessMatrixFF', self.stiffnessMatrixFF
yield 'dampingMatrixFF', self.dampingMatrixFF
yield 'forceVector', self.forceVector
yield 'forceUserFunction', self.forceUserFunction
yield 'massMatrixUserFunction', self.massMatrixUserFunction
yield 'computeFFRFterms', self.computeFFRFterms
yield 'objectIsInitialized', self.objectIsInitialized
yield 'Vshow', dict(self.visualization)["show"]
yield 'Vcolor', dict(self.visualization)["color"]
yield 'VtriangleMesh', dict(self.visualization)["triangleMesh"]
yield 'VshowNodes', dict(self.visualization)["showNodes"]
class VObjectFFRFreducedOrder:
def __init__(self, show = True, color = [-1.,-1.,-1.,-1.], triangleMesh = [], showNodes = False):
self.show = show
self.color = color
self.triangleMesh = triangleMesh
self.showNodes = showNodes
def __iter__(self):
yield 'show', self.show
yield 'color', self.color
yield 'triangleMesh', self.triangleMesh
yield 'showNodes', self.showNodes
class ObjectFFRFreducedOrder:
def __init__(self, name = '', nodeNumbers = [], massMatrixReduced = [], stiffnessMatrixReduced = [], dampingMatrixReduced = [], forceUserFunction = 0, massMatrixUserFunction = 0, computeFFRFterms = True, modeBasis = [], outputVariableModeBasis = [], outputVariableTypeModeBasis = 0, referencePositions = [], visualization = {'show': True, 'color': [-1.,-1.,-1.,-1.], 'triangleMesh': [], 'showNodes': False}):
self.name = name
self.nodeNumbers = nodeNumbers
self.massMatrixReduced = massMatrixReduced
self.stiffnessMatrixReduced = stiffnessMatrixReduced
self.dampingMatrixReduced = dampingMatrixReduced
self.forceUserFunction = forceUserFunction
self.massMatrixUserFunction = massMatrixUserFunction
self.computeFFRFterms = computeFFRFterms
self.modeBasis = modeBasis
self.outputVariableModeBasis = outputVariableModeBasis
self.outputVariableTypeModeBasis = outputVariableTypeModeBasis
self.referencePositions = referencePositions
self.visualization = visualization
def __iter__(self):
yield 'objectType', 'FFRFreducedOrder'
yield 'name', self.name
yield 'nodeNumbers', self.nodeNumbers
yield 'massMatrixReduced', self.massMatrixReduced
yield 'stiffnessMatrixReduced', self.stiffnessMatrixReduced
yield 'dampingMatrixReduced', self.dampingMatrixReduced
yield 'forceUserFunction', self.forceUserFunction
yield 'massMatrixUserFunction', self.massMatrixUserFunction
yield 'computeFFRFterms', self.computeFFRFterms
yield 'modeBasis', self.modeBasis
yield 'outputVariableModeBasis', self.outputVariableModeBasis
yield 'outputVariableTypeModeBasis', self.outputVariableTypeModeBasis
yield 'referencePositions', self.referencePositions
yield 'Vshow', dict(self.visualization)["show"]
yield 'Vcolor', dict(self.visualization)["color"]
yield 'VtriangleMesh', dict(self.visualization)["triangleMesh"]
yield 'VshowNodes', dict(self.visualization)["showNodes"]
#add typedef for short usage:
CMSobject = ObjectFFRFreducedOrder
VCMSobject = VObjectFFRFreducedOrder
class VObjectANCFCable2D:
def __init__(self, show = True, drawHeight = 0., color = [-1.,-1.,-1.,-1.]):
self.show = show
self.drawHeight = drawHeight
self.color = color
def __iter__(self):
yield 'show', self.show
yield 'drawHeight', self.drawHeight
yield 'color', self.color
class ObjectANCFCable2D:
def __init__(self, name = '', physicsLength = 0., physicsMassPerLength = 0., physicsBendingStiffness = 0., physicsAxialStiffness = 0., physicsBendingDamping = 0., physicsAxialDamping = 0., physicsReferenceAxialStrain = 0., physicsReferenceCurvature = 0., nodeNumbers = [-1, -1], useReducedOrderIntegration = False, visualization = {'show': True, 'drawHeight': 0., 'color': [-1.,-1.,-1.,-1.]}):
self.name = name
self.physicsLength = physicsLength
self.physicsMassPerLength = physicsMassPerLength
self.physicsBendingStiffness = physicsBendingStiffness
self.physicsAxialStiffness = physicsAxialStiffness
self.physicsBendingDamping = physicsBendingDamping
self.physicsAxialDamping = physicsAxialDamping
self.physicsReferenceAxialStrain = physicsReferenceAxialStrain
self.physicsReferenceCurvature = physicsReferenceCurvature
self.nodeNumbers = nodeNumbers
self.useReducedOrderIntegration = useReducedOrderIntegration
self.visualization = visualization
def __iter__(self):
yield 'objectType', 'ANCFCable2D'
yield 'name', self.name
yield 'physicsLength', self.physicsLength
yield 'physicsMassPerLength', self.physicsMassPerLength
yield 'physicsBendingStiffness', self.physicsBendingStiffness
yield 'physicsAxialStiffness', self.physicsAxialStiffness
yield 'physicsBendingDamping', self.physicsBendingDamping
yield 'physicsAxialDamping', self.physicsAxialDamping
yield 'physicsReferenceAxialStrain', self.physicsReferenceAxialStrain
yield 'physicsReferenceCurvature', self.physicsReferenceCurvature
yield 'nodeNumbers', self.nodeNumbers
yield 'useReducedOrderIntegration', self.useReducedOrderIntegration
yield 'Vshow', dict(self.visualization)["show"]
yield 'VdrawHeight', dict(self.visualization)["drawHeight"]
yield 'Vcolor', dict(self.visualization)["color"]
#add typedef for short usage:
Cable2D = ObjectANCFCable2D
VCable2D = VObjectANCFCable2D
class VObjectALEANCFCable2D:
def __init__(self, show = True, drawHeight = 0., color = [-1.,-1.,-1.,-1.]):
self.show = show
self.drawHeight = drawHeight
self.color = color
def __iter__(self):
yield 'show', self.show
yield 'drawHeight', self.drawHeight
yield 'color', self.color
class ObjectALEANCFCable2D:
def __init__(self, name = '', physicsLength = 0., physicsMassPerLength = 0., physicsMovingMassFactor = 1., physicsBendingStiffness = 0., physicsAxialStiffness = 0., physicsBendingDamping = 0., physicsAxialDamping = 0., physicsReferenceAxialStrain = 0., physicsReferenceCurvature = 0., physicsUseCouplingTerms = True, nodeNumbers = [-1, -1, -1], useReducedOrderIntegration = False, visualization = {'show': True, 'drawHeight': 0., 'color': [-1.,-1.,-1.,-1.]}):
self.name = name
self.physicsLength = physicsLength
self.physicsMassPerLength = physicsMassPerLength
self.physicsMovingMassFactor = physicsMovingMassFactor
self.physicsBendingStiffness = physicsBendingStiffness
self.physicsAxialStiffness = physicsAxialStiffness
self.physicsBendingDamping = physicsBendingDamping
self.physicsAxialDamping = physicsAxialDamping
self.physicsReferenceAxialStrain = physicsReferenceAxialStrain
self.physicsReferenceCurvature = physicsReferenceCurvature
self.physicsUseCouplingTerms = physicsUseCouplingTerms
self.nodeNumbers = nodeNumbers
self.useReducedOrderIntegration = useReducedOrderIntegration
self.visualization = visualization
def __iter__(self):
yield 'objectType', 'ALEANCFCable2D'
yield 'name', self.name
yield 'physicsLength', self.physicsLength
yield 'physicsMassPerLength', self.physicsMassPerLength
yield 'physicsMovingMassFactor', self.physicsMovingMassFactor
yield 'physicsBendingStiffness', self.physicsBendingStiffness
yield 'physicsAxialStiffness', self.physicsAxialStiffness
yield 'physicsBendingDamping', self.physicsBendingDamping
yield 'physicsAxialDamping', self.physicsAxialDamping
yield 'physicsReferenceAxialStrain', self.physicsReferenceAxialStrain
yield 'physicsReferenceCurvature', self.physicsReferenceCurvature
yield 'physicsUseCouplingTerms', self.physicsUseCouplingTerms
yield 'nodeNumbers', self.nodeNumbers
yield 'useReducedOrderIntegration', self.useReducedOrderIntegration
yield 'Vshow', dict(self.visualization)["show"]
yield 'VdrawHeight', dict(self.visualization)["drawHeight"]
yield 'Vcolor', dict(self.visualization)["color"]
#add typedef for short usage:
ALECable2D = ObjectALEANCFCable2D
VALECable2D = VObjectALEANCFCable2D
class VObjectGround:
def __init__(self, show = True, graphicsDataUserFunction = 0, color = [-1.,-1.,-1.,-1.], graphicsData = []):
self.show = show
self.graphicsDataUserFunction = graphicsDataUserFunction
self.color = color
self.graphicsData = graphicsData
def __iter__(self):
yield 'show', self.show
yield 'graphicsDataUserFunction', self.graphicsDataUserFunction
yield 'color', self.color
yield 'graphicsData', self.graphicsData
class ObjectGround:
def __init__(self, name = '', referencePosition = | |
is not None:
out = d_mini[self._invs[0]] # AM
out[self._dipoles[0]] -= d_mini[self._invs[1]] # AN
out[self._dipoles[1]] -= d_mini[self._invs[2]] # BM
out[self._dipoles[0] & self._dipoles[1]] += d_mini[self._invs[3]] # BN
else:
out = d_mini
return out
def _mini_survey_dataT(self, v):
if self._mini_survey is not None:
out = np.zeros(self._mini_survey.nD)
# Need to use ufunc.at because there could be repeated indices
# That need to be properly handled.
np.add.at(out, self._invs[0], v) # AM
np.subtract.at(out, self._invs[1], v[self._dipoles[0]]) # AN
np.subtract.at(out, self._invs[2], v[self._dipoles[1]]) # BM
np.add.at(out, self._invs[3], v[self._dipoles[0] & self._dipoles[1]]) # BN
return out
else:
out = v
return out
####################################################
# Mass Matrices
####################################################
@property
def MnSigma(self):
"""
Node inner product matrix for \\(\\sigma\\). Used in the E-B
formulation
"""
# TODO: only works isotropic sigma
if getattr(self, "_MnSigma", None) is None:
sigma = self.sigma
vol = self.mesh.vol
self._MnSigma = sdiag(self.mesh.aveN2CC.T * (vol * sigma))
return self._MnSigma
@property
def MnSigmaDerivMat(self):
"""
Derivative of MnSigma with respect to the model
"""
if getattr(self, "_MnSigmaDerivMat", None) is None:
vol = self.mesh.vol
self._MnSigmaDerivMat = self.mesh.aveN2CC.T * sdiag(vol) * self.sigmaDeriv
return self._MnSigmaDerivMat
def MnSigmaDeriv(self, u, v, adjoint=False):
"""
Derivative of MnSigma with respect to the model times a vector (u)
"""
if v.ndim > 1:
u = u[:, None]
if self.storeInnerProduct:
if adjoint:
return self.MnSigmaDerivMat.T * (u * v)
else:
return u * (self.MnSigmaDerivMat * v)
else:
vol = self.mesh.vol
if v.ndim > 1:
vol = vol[:, None]
if adjoint:
return self.sigmaDeriv.T * (vol * (self.mesh.aveN2CC * (u * v)))
else:
dsig_dm_v = self.sigmaDeriv * v
return u * (self.mesh.aveN2CC.T * (vol * dsig_dm_v))
@property
def MccRhoi(self):
"""
Cell inner product matrix for \\(\\rho^{-1}\\). Used in the H-J
formulation
"""
# TODO: only works isotropic rho
if getattr(self, "_MccRhoi", None) is None:
self._MccRhoi = sdiag(self.mesh.vol / self.rho)
return self._MccRhoi
@property
def MccRhoiDerivMat(self):
"""
Derivative of MccRho with respect to the model
"""
if getattr(self, "_MccRhoiDerivMat", None) is None:
rho = self.rho
vol = self.mesh.vol
self._MccRhoiDerivMat = sdiag(vol * (-1.0 / rho ** 2)) * self.rhoDeriv
return self._MccRhoiDerivMat
def MccRhoiDeriv(self, u, v, adjoint=False):
"""
Derivative of :code:`MccRhoi` with respect to the model.
"""
if self.rhoMap is None:
return Zero()
if len(self.rho.shape) > 1:
if self.rho.shape[1] > self.mesh.dim:
raise NotImplementedError(
"Full anisotropy is not implemented for MccRhoiDeriv."
)
if self.storeInnerProduct:
if adjoint:
return self.MccRhoiDerivMat.T * (sdiag(u) * v)
else:
return sdiag(u) * (self.MccRhoiDerivMat * v)
else:
vol = self.mesh.vol
rho = self.rho
if adjoint:
return self.rhoDeriv.T * (sdiag(u * vol * (-1.0 / rho ** 2)) * v)
else:
return (sdiag(u * vol * (-1.0 / rho ** 2))) * (self.rhoDeriv * v)
class Simulation2DCellCentered(BaseDCSimulation2D):
"""
2.5D cell centered DC problem
"""
_solutionType = "phiSolution"
_formulation = "HJ" # CC potentials means J is on faces
fieldsPair = Fields2DCellCentered
fieldsPair_fwd = Fields3DCellCentered
bc_type = properties.StringChoice(
"Type of boundary condition to use for simulation. Note that Robin and Mixed "
"are equivalent.",
choices=["Dirichlet", "Neumann", "Robin", "Mixed"],
default="Robin",
)
def __init__(self, mesh, **kwargs):
BaseDCSimulation2D.__init__(self, mesh, **kwargs)
V = sdiag(self.mesh.cell_volumes)
self.Div = V @ self.mesh.face_divergence
self.Grad = self.Div.T
def getA(self, ky):
"""
Make the A matrix for the cell centered DC resistivity problem
A = D MfRhoI G
"""
# To handle Mixed boundary condition
self.setBC(ky=ky)
D = self.Div
G = self.Grad
if self.bc_type != "Dirichlet":
G = G - self._MBC[ky]
MfRhoI = self.MfRhoI
# Get resistivity rho
A = D * MfRhoI * G + ky ** 2 * self.MccRhoi
if self.bc_type == "Neumann":
A[0, 0] = A[0, 0] + 1.0
return A
def getADeriv(self, ky, u, v, adjoint=False):
D = self.Div
G = self.Grad
if self.bc_type != "Dirichlet":
G = G - self._MBC[ky]
if adjoint:
return self.MfRhoIDeriv(
G * u.flatten(), D.T * v, adjoint=adjoint
) + ky ** 2 * self.MccRhoiDeriv(u.flatten(), v, adjoint=adjoint)
else:
return D * self.MfRhoIDeriv(
G * u.flatten(), v, adjoint=adjoint
) + ky ** 2 * self.MccRhoiDeriv(u.flatten(), v, adjoint=adjoint)
def getRHS(self, ky):
"""
RHS for the DC problem
q
"""
RHS = self.getSourceTerm(ky)
return RHS
def getRHSDeriv(self, ky, src, v, adjoint=False):
"""
Derivative of the right hand side with respect to the model
"""
# TODO: add qDeriv for RHS depending on m
# qDeriv = src.evalDeriv(self, ky, adjoint=adjoint)
# return qDeriv
return Zero()
def setBC(self, ky=None):
if self.bc_type == "Dirichlet":
return
if getattr(self, "_MBC", None) is None:
self._MBC = {}
if ky in self._MBC:
# I have already created the BC matrix for this wavenumber
return
if self.bc_type == "Neumann":
alpha, beta, gamma = 0, 1, 0
else:
mesh = self.mesh
boundary_faces = mesh.boundary_faces
boundary_normals = mesh.boundary_face_outward_normals
n_bf = len(boundary_faces)
# Top gets 0 Neumann
alpha = np.zeros(n_bf)
beta = np.ones(n_bf)
gamma = 0
# assume a source point at the middle of the top of the mesh
middle = np.median(mesh.nodes, axis=0)
top_v = np.max(mesh.nodes[:, -1])
source_point = np.r_[middle[:-1], top_v]
r_vec = boundary_faces - source_point
r = np.linalg.norm(r_vec, axis=-1)
r_hat = r_vec / r[:, None]
r_dot_n = np.einsum("ij,ij->i", r_hat, boundary_normals)
# determine faces that are on the sides and bottom of the mesh...
if mesh._meshType.lower() == "tree":
not_top = boundary_faces[:, -1] != top_v
else:
# mesh faces are ordered, faces_x, faces_y, faces_z so...
is_b = make_boundary_bool(mesh.shape_faces_y)
is_t = np.zeros(mesh.shape_faces_y, dtype=bool, order="F")
is_t[:, -1] = True
is_t = is_t.reshape(-1, order="F")[is_b]
not_top = np.zeros(boundary_faces.shape[0], dtype=bool)
not_top[-len(is_t) :] = ~is_t
# use the exponentialy scaled modified bessel function of second kind,
# (the division will cancel out the scaling)
# This is more stable for large values of ky * r
# actual ratio is k1/k0...
alpha[not_top] = (ky * k1e(ky * r) / k0e(ky * r) * r_dot_n)[not_top]
B, bc = self.mesh.cell_gradient_weak_form_robin(alpha, beta, gamma)
# bc should always be 0 because gamma was always 0 above
self._MBC[ky] = B
class Simulation2DNodal(BaseDCSimulation2D):
"""
2.5D nodal DC problem
"""
_solutionType = "phiSolution"
_formulation = "EB" # CC potentials means J is on faces
fieldsPair = Fields2DNodal
fieldsPair_fwd = Fields3DNodal
_gradT = None
bc_type = properties.StringChoice(
"Type of boundary condition to use for simulation. Note that Robin and Mixed "
"are equivalent.",
choices=["Neumann", "Robin", "Mixed"],
default="Robin",
)
def __init__(self, mesh, **kwargs):
BaseDCSimulation2D.__init__(self, mesh, **kwargs)
self.solver_opts["is_symmetric"] = True
self.solver_opts["is_positive_definite"] = True
def getA(self, ky):
"""
Make the A matrix for the cell centered DC resistivity problem
A = D MfRhoI G
"""
# To handle Mixed boundary condition
self.setBC(ky=ky)
MeSigma = self.MeSigma
MnSigma = self.MnSigma
Grad = self.mesh.nodalGrad
if self._gradT is None:
self._gradT = Grad.T.tocsr() # cache the .tocsr()
GradT = self._gradT
A = GradT * MeSigma * Grad + ky ** 2 * MnSigma
if self.bc_type != "Neumann":
try:
A = A + sdiag(self._AvgBC[ky] @ self.sigma)
except ValueError as err:
if len(self.sigma) != len(self.mesh):
raise NotImplementedError(
"Anisotropic conductivity is not supported for Robin boundary "
"conditions, please use 'Neumann'."
)
else:
raise err
return A
def getADeriv(self, ky, u, v, adjoint=False):
Grad = self.mesh.nodalGrad
if adjoint:
out = self.MeSigmaDeriv(
Grad * u.flatten(), Grad * v, adjoint=adjoint
) + ky ** 2 * self.MnSigmaDeriv(u.flatten(), v, adjoint=adjoint)
else:
out = Grad.T * self.MeSigmaDeriv(
Grad * u.flatten(), v, adjoint=adjoint
) + ky ** 2 * self.MnSigmaDeriv(u.flatten(), v, adjoint=adjoint)
if self.bc_type != "Neumann" and self.sigmaMap is not None:
if getattr(self, "_MBC_sigma", None) is None:
self._MBC_sigma = {}
if ky not in self._MBC_sigma:
self._MBC_sigma[ky] = self._AvgBC[ky] @ self.sigmaDeriv
if not isinstance(u, Zero):
u = u.flatten()
if v.ndim > 1:
u = u[:, None]
if not adjoint:
out += u * (self._MBC_sigma[ky] @ v)
else:
out += self._MBC_sigma[ky].T @ (u * v)
return out
def getRHS(self, ky):
"""
RHS for the DC problem
q
"""
RHS = self.getSourceTerm(ky)
return RHS
def getRHSDeriv(self, ky, src, v, adjoint=False):
"""
Derivative of the right hand side with respect to the model
"""
# TODO: add qDeriv for RHS depending on m
# qDeriv = src.evalDeriv(self, ky, adjoint=adjoint)
# return qDeriv
return Zero()
def setBC(self, ky=None):
if self.bc_type == "Dirichlet":
# do nothing
raise ValueError(
"Dirichlet conditions are not | |
<gh_stars>0
# """
# This script contains the function generated by cvxopt package.
# 1 The Logistic function is to solve the logistic lasso (L1) regression problem with only feature and labels (No comparison included).
# 2 The Log_Log is to solve logistic lasso (L1) regression with Bradley Terry comparison model.
# 3 The SVM_Log function is to solve SVM for absolute labels and Logistic regression for comparison labels.
from cvxopt import solvers, matrix, spdiag, log, exp, div, spmatrix
import numpy as np
import cvxopt.modeling as cvm
import sys
from sklearn.linear_model import LogisticRegression
def Logistic(absDataOrigin,absLabels, lamda,alpha=1):
# This function uses both absolute label data and comparison data to train the logistic regression model.
# Equation: min_{beta, const} sum(logisticLoss(absData))+lamda*norm(beta,1)
# Parameter:
# ------------
# absDataOrigin : N by d numpy matrix where N the number of absolute label data and d is the dimension of data
# abslabels : (N,) numpy array, +1 means positive label and -1 represents negative labels
# lamda : weight on L1 penalty. Large lamda would have more zeros in beta.
# Return:
# ------------
# beta : the logistic regression model parameter
# const : the logistic regression global constant.
absN,d = np.shape(absDataOrigin)
absData = np.concatenate((np.array(absDataOrigin),np.ones([absN,1])),axis = 1)
# A : y_i * x_i since y_i is a scalar
A = np.multiply(absLabels.T, absData.T).T #absData must be in N, d matrix, and absLabels must be in (N,1) or (N,) matrix
A = matrix(A)
def F(x=None, z=None):
# beta without constant x[:d], constant x[d], t = x[d+1:]
if x is None: return 2 * d, matrix(0.0,(2*d+1,1)) # m = 2 *d is the number of constraints
e = A*x[:d+1] # 0 - d contains the constant
w = exp(e)
f = matrix(0.0,(2*d+1,1))
f[0] = alpha*(-sum(e) + sum(log(1+w))) + lamda * sum(x[d+1::])# from d+1 withou the constant
f[1:d+1] = x[:d] - x[d+1:] # beta - t < 0
f[d+1:] = -x[:d] - x[d+1:] # -beta - t <0
Df = matrix(0.0,(2*d+1,2*d+1))
# Df[0,:d+1] = (matrix(A.T * (div(w,1+w)-1.0))).T
Df[0, :d + 1] = alpha*(matrix(A.T * (div(w, 1 + w) - 1.0))).T
Df[0,d+1:] = lamda
Df[1:d+1,0:d] = spdiag(matrix(1.0,(d,1)))
Df[d+1:, 0:d] = spdiag(matrix(-1.0,(d,1)))
Df[1:d+1,d+1:] = spdiag(matrix(-1.0,(d,1)))
Df[d+1:,d+1:] = spdiag(matrix(-1.0,(d,1)))
if z is None: return f ,Df
H = matrix(0.0,(2*d+1,2*d+1))
H[0:d+1,0:d+1] = alpha*(A.T *spdiag(div(w, (1 + w) ** 2)) * A)
return f, Df, z[0]*H
solvers.options['show_progress'] = False
sol = solvers.cp(F)
beta, const = sol['x'][0:d], sol['x'][d]
return beta, const
def Log_Log(absDataOrigin,absLabels,cmpDataOrigin,cmpLabels, absWeight, lamda):
# This function uses both absolute label data and comparison data to train the logistic regression model.
# The comparison data and label must be included.
# Equation: min_{beta, const} alpha*sum(logisticLoss(absData))+(1-alpha)*sum(logisticLoss(cmpData))+lamda*norm(beta,1)
# Parameter:
# ------------
# absDataOrigin : N by d numpy matrix where N the number of absolute label data and d is the dimension of data
# abslabels : (N,) numpy array, +1 means positive label and -1 represents negative labels
# cmpDataOrigin : N by d numpy matrix where N the number of comparion label data and d is the dimension of data
# cmpLabels : (N,) numpy array, +1 means positive label and -1 represents negative labels
# absWeight : the Weight on absolute label data. And (1-absWeight) would be the weight on comparison data.
# lamda : weight on L1 penalty. Large lamda would have more zeros in beta.
# normalizeWeight : binary value. 1 describes the normalize factor on the absWeight and cmpWeight by its number of data.
# 0 shows no normalied factor happen.
# Return:
# ------------
# beta : the logistic regression model parameter
# const : the logistic regression global constant.
absN,d = np.shape(absDataOrigin)
cmpN,_ = np.shape(cmpDataOrigin)
cmpWeight = 1.0 - absWeight
if cmpWeight == 1:
mdl = LogisticRegression(penalty='l1', C=1. / lamda)
mdl.fit(cmpDataOrigin, cmpLabels)
beta = mdl.coef_.T
const = mdl.intercept_
return beta, const
elif absWeight == 1:
mdl = LogisticRegression(penalty='l1', C=1. / lamda)
mdl.fit(absDataOrigin, absLabels)
beta = mdl.coef_.T
const = mdl.intercept_
return beta, const
else:
absData = np.concatenate((np.array(absDataOrigin),np.ones([absN,1])),axis = 1)
cmpData = np.concatenate((np.array(cmpDataOrigin),np.ones([cmpN,1])),axis=1)
# A : y_i * x_i since y_i is a scalar
absA = np.multiply(absLabels, absData.T).T # absData must be in N, d matrix, and absLabels must be in (N,1) or (N,) matrix
absA = matrix(absA)
cmpA = np.multiply(cmpLabels, cmpData.T).T # absData must be in N, d matrix, and absLabels must be in (N,1) or (N,) matrix
cmpA = matrix(cmpA)
def F(x=None, z=None):
# beta without constant x[:d], constant x[d], t = x[d+1:]
if x is None: return 2 * d, matrix(0.0,(2*d+1,1)) # m = 2 *d is the number of constraints
absE = absA*x[:d+1] # 0 - d contains the constant
absW = exp(absE)
cmpE = cmpA*x[:d+1]
cmpW = exp(cmpE)
f = matrix(0.0,(2*d+1,1))
f[0] = absWeight*(-sum(absE) + sum(log(1+absW))) + cmpWeight*(-sum(cmpE) + sum(log(1+cmpW))) + lamda * sum(x[d+1:])# from d+1 withou the constant
f[1:d+1] = x[:d] - x[d+1:] # beta - t < 0
f[d+1:] = -x[:d] - x[d+1:] # -beta - t <0
Df = matrix(0.0,(2*d+1,2*d+1))
Df[0,:d+1] = absWeight*(matrix(absA.T * (div(absW,1+absW)-1.0))).T + cmpWeight*(matrix(cmpA.T * (div(cmpW,1+cmpW)-1.0))).T
Df[0,d+1:] = lamda
Df[1:d+1,0:d] = spdiag(matrix(1.0,(d,1)))
Df[d+1:, 0:d] = spdiag(matrix(-1.0,(d,1)))
Df[1:d+1,d+1:] = spdiag(matrix(-1.0,(d,1)))
Df[d+1:,d+1:] = spdiag(matrix(-1.0,(d,1)))
if z is None: return f ,Df
H = matrix(0.0,(2*d+1,2*d+1))
H[0:d+1,0:d+1] = absWeight*(absA.T *spdiag(div(absW, (1 + absW) ** 2)) * absA) + cmpWeight*(cmpA.T *spdiag(div(cmpW, (1 +cmpW) ** 2)) * cmpA)
return f, Df, z[0]*H
solvers.options['show_progress'] = False
sol = solvers.cp(F)
beta, const = sol['x'][0:d], sol['x'][d]
return beta, const
def SVM_Log(absDataOrigin,absLabels,cmpDataOrigin,cmpLabels, absWeight, lamda,cmpIgnore=False):
# This function uses both absolute label data to train the SVM primal model and comparison data is using Bradley-Terry model.
# The comparison data and label must be included.
# Equation:
# Parameter:
# ------------
# absDataOrigin : N by d numpy matrix where N the number of absolute label data and d is the dimension of data
# abslabels : (N,) numpy array, +1 means positive label and -1 represents negative labels
# cmpDataOrigin : N by d numpy matrix where N the number of comparion label data and d is the dimension of data
# cmpLabels : (N,) numpy array, +1 means positive label and -1 represents negative labels
# absWeight : the Weight on absolute label data. And (1-absWeight) would be the weight on comparison data.
# lamda : weight on L1 penalty. Large lamda would have more zeros in beta.
# cmpIgnore : boolean variable. If True, the function will completely ignore the comparison data and labels. Please let them be None when input. And absWeight will be 1.
# Return:
# ------------
# beta : the SVM model parameter
# const : the SVM global constant.
if cmpIgnore is True:
# This component is currently wrong. Guess it is not satisfied the CVXOPT assumptions.
absWeight = 1
absN, d = np.shape(absDataOrigin)
absData = np.concatenate((np.array(absDataOrigin), np.ones([absN, 1])), axis=1)
# y_i times x_i and y_i is a scalar number
absA = np.multiply(absLabels,absData.T).T # absData must be in N, d matrix, and absLabels must be in (N,1) or (N,) matrix
absA = matrix(absA)
def F(x=None, z=None):
# The cvxopt matrix slicing does not include the last number.
# x[0:d] is beta; x[d] is const; x[d+1:2*d+1] is t ; x[2*d+1:] is zeta
if x is None: return 2 * d + 2*absN, matrix(0.0, (2*d+1+absN, 1))
absS = absA * x[:d + 1] # 0 - d contains the constant. Absolute label scores.
f = matrix(0.0,(2*d+1+2*absN,1))
f[0] = absWeight*sum(x[2*d+1:]) + lamda * sum(x[d+1:2*d+1])
f[1: d + 1] = x[:d] - x[d+1:2*d+1] # beta - t <= 0
f[d + 1: 2*d+1] = -x[:d] - x[d+1:2*d+1] # -beta - t <= 0
f[2*d+1:2*d+1+absN] = -absS-x[2*d+1:]+1 # -y_i(beta.T*x_i)-zeta_i+1 <=0
f[2*d+1+absN:] = -x[2*d+1:]
Df = matrix(0.0, (2*d+1+2*absN, 2*d+1+absN))
Df[0, d+1 : 2*d+1] = lamda
Df[0, 2*d+1:] = absWeight
Df[1 : d+1, 0:d] = spdiag(matrix(1.0, (d, 1)))
Df[d+1: 2*d+1, 0:d] = spdiag(matrix(-1.0, (d, 1)))
Df[1 : d+1, d+1 : 2*d+1] = spdiag(matrix(-1.0, (d, 1)))
Df[d+1 : 2*d+1, d+1 : 2*d+1] = spdiag(matrix(-1.0, (d, 1)))
Df[2*d+1:2*d+1+absN, 0:d+1] = -absA
Df[2*d+1:2*d+1+absN , 2*d+1:] = spdiag(matrix(-1.0,(absN,1)))
Df[2*d+1+absN:,2*d+1:] = spdiag(matrix(-1.0,(absN,1)))
if z is None: return f, Df
H = matrix(0.0, (2*d+1+absN, 2*d+1+absN))
return f, Df, z[0] * H
| |
# Plan Trial (+-), Plan Trial Metered Feature (+-),
# Plan After Trial (+), Metered Features After Trial (+)
document_entries = DocumentEntry.objects.all()
assert len(document_entries) == 6
doc = document_entries[0] # Plan trial (+)
assert doc.unit_price == Decimal('57.14')
doc = document_entries[1] # Plan trial (-)
assert doc.unit_price == Decimal('-57.14')
doc = document_entries[2] # Consumed mf (+)
assert doc.unit_price == metered_feature.price_per_unit
assert doc.quantity == mf_units_log_during_trial.consumed_units
doc = document_entries[3] # Consumed mf (-)
assert doc.unit_price == - metered_feature.price_per_unit
assert doc.quantity == mf_units_log_during_trial.consumed_units
doc = document_entries[4] # Plan after trial end
assert doc.unit_price == Decimal(20.0 / 28).quantize(Decimal('0.0000')) * plan.amount
doc = document_entries[5] # Consumed mf after trial
assert doc.unit_price == metered_feature.price_per_unit
assert doc.quantity == mf_units_log_after_trial.consumed_units
def test_canceled_subscription_with_metered_features_to_draft(self):
"""
start_date = 2015-01-01
trial_end = 2015-01-08
last_billing_date = 2015-02-01
"""
billing_date = generate_docs_date('2015-03-01')
metered_feature = MeteredFeatureFactory(included_units=Decimal('0.00'))
plan = PlanFactory.create(interval='month', interval_count=1,
generate_after=120, enabled=True,
trial_period_days=7, amount=Decimal('200.00'),
metered_features=[metered_feature])
start_date = dt.date(2015, 1, 1)
subscription = SubscriptionFactory.create(plan=plan, start_date=start_date)
subscription.activate()
subscription.save()
mf_units_log = MeteredFeatureUnitsLogFactory(
subscription=subscription, metered_feature=metered_feature,
start_date=dt.datetime(2015, 2, 1),
end_date=dt.datetime(2015, 2, 28)
)
subscription.cancel(when=Subscription.CANCEL_OPTIONS.END_OF_BILLING_CYCLE)
subscription.cancel_date = dt.date(2015, 2, 28)
subscription.save()
BillingLog.objects.create(subscription=subscription,
billing_date=dt.date(2015, 2, 1),
plan_billed_up_to=dt.date(2015, 2, 28),
metered_features_billed_up_to=dt.date(2015, 1, 31))
call_command('generate_docs', billing_date=billing_date, stdout=self.output)
# Expect one Proforma
assert Proforma.objects.all().count() == 1
assert Invoice.objects.all().count() == 0
# Expect 1 entry:
# Extra Metered Features (+)
assert DocumentEntry.objects.all().count() == 1
doc = DocumentEntry.objects.all()[0]
assert doc.unit_price == metered_feature.price_per_unit
assert doc.quantity == mf_units_log.consumed_units
def test_canceled_subscription_with_trial_and_trial_underflow(self):
"""
A subscription that was canceled in the same month as it started,
the customer consuming less metered features than
included_units_during_trial.
"""
billing_date = generate_docs_date('2015-03-01')
metered_feature = MeteredFeatureFactory(
included_units=Decimal('0.00'),
included_units_during_trial=Decimal('5.00'))
plan = PlanFactory.create(interval='month', interval_count=1,
generate_after=120, enabled=True,
trial_period_days=7, amount=Decimal('200.00'),
metered_features=[metered_feature])
start_date = dt.date(2015, 2, 1)
subscription = SubscriptionFactory.create(plan=plan, start_date=start_date)
subscription.activate()
subscription.save()
trial_quantity = Decimal('3.00')
MeteredFeatureUnitsLogFactory(
subscription=subscription, metered_feature=metered_feature,
start_date=start_date, end_date=subscription.trial_end,
consumed_units=trial_quantity)
mf_units_log_after_trial = MeteredFeatureUnitsLogFactory(
subscription=subscription, metered_feature=metered_feature,
start_date=subscription.trial_end + dt.timedelta(days=1),
end_date=dt.datetime(2015, 2, 28)
)
subscription.cancel(when=Subscription.CANCEL_OPTIONS.END_OF_BILLING_CYCLE)
subscription.cancel_date = dt.date(2015, 2, 28)
subscription.save()
call_command('generate_docs', billing_date=billing_date, stdout=self.output)
# Expect one Proforma
assert Proforma.objects.all().count() == 1
assert Invoice.objects.all().count() == 0
# In draft state
assert Proforma.objects.all()[0].state == Proforma.STATES.DRAFT
document_entries = DocumentEntry.objects.all()
# Expect 6 entries:
# Plan Trial (+-), Plan Trial Metered Feature (+-),
# Plan After Trial (+), Metered Features After Trial (+)
assert len(document_entries) == 6
doc = document_entries[0] # Plan trial (+)
assert doc.unit_price == Decimal(7.0 / 28).quantize(Decimal('0.0000')) * plan.amount
doc = document_entries[1] # Plan trial (-)
assert doc.unit_price == Decimal(-7.0 / 28).quantize(Decimal('0.0000')) * plan.amount
doc = document_entries[2] # Consumed mf (+)
assert doc.unit_price == metered_feature.price_per_unit
assert doc.quantity == trial_quantity
doc = document_entries[3] # Consumed mf (-)
assert doc.unit_price == - metered_feature.price_per_unit
assert doc.quantity == trial_quantity
doc = document_entries[4] # Plan after trial end
assert doc.unit_price == Decimal(21.0 / 28).quantize(Decimal('0.0000')) * plan.amount
doc = document_entries[5] # Consumed mf after trial
assert doc.unit_price == metered_feature.price_per_unit
assert doc.quantity == mf_units_log_after_trial.consumed_units
def test_canceled_subscription_with_trial_and_trial_overflow(self):
billing_date = generate_docs_date('2015-03-01')
units_included_during_trial = Decimal('5.00')
metered_feature = MeteredFeatureFactory(
included_units=Decimal('0.00'),
included_units_during_trial=units_included_during_trial)
plan = PlanFactory.create(interval='month', interval_count=1,
generate_after=120, enabled=True,
trial_period_days=7, amount=Decimal('200.00'),
metered_features=[metered_feature])
start_date = dt.date(2015, 2, 1)
subscription = SubscriptionFactory.create(plan=plan, start_date=start_date)
subscription.activate()
subscription.save()
units_consumed_during_trial = Decimal('7.00')
MeteredFeatureUnitsLogFactory(
subscription=subscription, metered_feature=metered_feature,
start_date=start_date, end_date=subscription.trial_end,
consumed_units=units_consumed_during_trial)
mf_units_log_after_trial = MeteredFeatureUnitsLogFactory(
subscription=subscription, metered_feature=metered_feature,
start_date=subscription.trial_end + dt.timedelta(days=1),
end_date=dt.datetime(2015, 2, 28)
)
subscription.cancel(
when=Subscription.CANCEL_OPTIONS.END_OF_BILLING_CYCLE
)
subscription.cancel_date = dt.date(2015, 2, 28)
subscription.save()
call_command('generate_docs', billing_date=billing_date,
stdout=self.output)
# Expect one Proforma
assert Proforma.objects.all().count() == 1
assert Invoice.objects.all().count() == 0
# In draft state
assert Proforma.objects.all()[0].state == Proforma.STATES.DRAFT
document_entries = DocumentEntry.objects.all()
# Expect 7 entries:
# Plan Trial (+-), Plan Trial Metered Feature (+-),
# Extra consumed mf
# Plan After Trial (+), Metered Features After Trial (+)
assert len(document_entries) == 7
doc = document_entries[0] # Plan trial (+)
assert doc.unit_price == Decimal(7.0 / 28).quantize(Decimal('0.0000')) * plan.amount
doc = document_entries[1] # Plan trial (-)
assert doc.unit_price == Decimal(-7.0 / 28).quantize(Decimal('0.0000')) * plan.amount
doc = document_entries[2] # Consumed mf (+)
assert doc.unit_price == metered_feature.price_per_unit
assert doc.quantity == units_included_during_trial
doc = document_entries[3] # Consumed mf (-)
assert doc.unit_price == - metered_feature.price_per_unit
assert doc.quantity == units_included_during_trial
doc = document_entries[4] # Consumed mf (-)
assert doc.unit_price == metered_feature.price_per_unit
assert doc.quantity == units_consumed_during_trial - units_included_during_trial
doc = document_entries[5] # Plan after trial end
assert doc.unit_price == Decimal(21.0 / 28).quantize(Decimal('0.0000')) * plan.amount
doc = document_entries[6] # Consumed mf after trial
assert doc.unit_price == metered_feature.price_per_unit
assert doc.quantity == mf_units_log_after_trial.consumed_units
def test_gen_for_single_canceled_subscription_during_trial(self):
plan = PlanFactory.create(interval=Plan.INTERVALS.MONTH,
interval_count=1, generate_after=120,
enabled=True, trial_period_days=7,
amount=Decimal('200.00'))
subscription = SubscriptionFactory.create(plan=plan, start_date=dt.date(2015, 1, 3))
subscription.activate()
subscription.cancel(when=Subscription.CANCEL_OPTIONS.NOW)
subscription.cancel_date = dt.date(2015, 1, 6)
subscription.save()
call_command('generate_docs', date=generate_docs_date('2015-01-06'),
subscription=subscription.pk, stdout=self.output)
assert Subscription.objects.filter(state='ended').count() == 0
# the date after the cancel date
call_command('generate_docs', date=generate_docs_date('2015-01-07'),
subscription=subscription.pk, stdout=self.output)
assert Subscription.objects.filter(state='ended').count() == 1
assert Proforma.objects.all().count() == 1
assert Invoice.objects.all().count() == 0
proforma = Proforma.objects.all()[0]
assert proforma.proforma_entries.count() == 2
for entry in proforma.proforma_entries.all():
assert entry.prorated
assert entry.start_date == subscription.start_date
assert entry.end_date == subscription.cancel_date
assert proforma.total == Decimal('0.0000')
def test_gen_active_and_canceled_selection(self):
plan = PlanFactory.create(interval='month', interval_count=1,
generate_after=120, enabled=True,
trial_period_days=7, amount=Decimal('200.00'))
start_date = dt.date(2015, 1, 29)
SubscriptionFactory.create_batch(size=5, plan=plan, start_date=start_date)
for subscription in Subscription.objects.all():
subscription.activate()
subscription.save()
cancel_date = dt.date(2015, 1, 29)
for subscription in Subscription.objects.all()[2:5]:
subscription.cancel(when=Subscription.CANCEL_OPTIONS.NOW)
subscription.cancel_date = cancel_date
subscription.save()
call_command('generate_docs', billing_date=cancel_date, stdout=self.output)
# Expect 2 Proformas from the active subs
assert Proforma.objects.all().count() == 2
assert Subscription.objects.filter(state='ended').count() == 0
call_command('generate_docs', billing_date=cancel_date + ONE_DAY, stdout=self.output)
# Expect 5 Proformas (2 active Subs, 3 canceled)
assert Proforma.objects.all().count() == 5
assert Invoice.objects.all().count() == 0
assert Subscription.objects.filter(state='ended').count() == 3
def test_subscription_with_separate_cycles_during_trial(self):
separate_cycles_during_trial = True
prebill_plan = False
generate_documents_on_trial_end = False
metered_feature = MeteredFeatureFactory(
included_units_during_trial=Decimal('5.00'),
price_per_unit=Decimal('1.00')
)
plan = PlanFactory.create(interval=Plan.INTERVALS.MONTH,
interval_count=1, generate_after=120,
enabled=True, trial_period_days=15,
amount=Decimal('200.00'),
separate_cycles_during_trial=separate_cycles_during_trial,
generate_documents_on_trial_end=generate_documents_on_trial_end,
prebill_plan=prebill_plan,
metered_features=[metered_feature])
subscription = SubscriptionFactory.create(plan=plan, start_date=dt.date(2015, 1, 25))
subscription.activate()
subscription.save()
subscription.customer.sales_tax_percent = None
subscription.customer.save()
mf_log = MeteredFeatureUnitsLogFactory.create(
subscription=subscription, metered_feature=metered_feature,
start_date=subscription.start_date, end_date=dt.date(2015, 1, 31),
consumed_units=Decimal('5.00')
)
call_command('generate_docs', date=generate_docs_date('2015-01-25'), stdout=self.output)
assert Proforma.objects.all().count() == 0
call_command('generate_docs', date=generate_docs_date('2015-02-01'), stdout=self.output)
assert Proforma.objects.all().count() == 1
proforma = Proforma.objects.all()[0]
assert proforma.total == Decimal('0.00')
assert proforma.proforma_entries.count() == 4 # plan trial and consumed mfs
for entry in proforma.proforma_entries.all():
if entry.product_code == plan.product_code:
unit_price = Decimal(7 / 31.0).quantize(Decimal('0.0000')) * plan.amount
assert entry.quantity == 1
else:
assert entry.quantity == mf_log.consumed_units
unit_price = metered_feature.price_per_unit
if entry.unit_price < 0: # discount
unit_price *= -1
assert entry.unit_price == unit_price
assert entry.prorated
assert entry.start_date == subscription.start_date
assert entry.end_date == dt.date(2015, 1, 31)
call_command('generate_docs', date=generate_docs_date('2015-03-01'),
subscription=subscription.pk, stdout=self.output)
assert Proforma.objects.all().count() == 2
proforma = Proforma.objects.all()[1]
billed_plan_amount = Decimal(20 / 28.0).quantize(Decimal('0.0000')) * plan.amount
assert proforma.total == billed_plan_amount
assert proforma.proforma_entries.count() == 4 # plan trial (+-), plan (+) and mfs (0)
for entry in proforma.proforma_entries.all():
if entry.product_code == plan.product_code:
assert entry.quantity == 1
if entry.start_date == dt.date(2015, 2, 1): # trial
unit_price = plan.amount - billed_plan_amount
assert entry.end_date == dt.date(2015, 2, 8)
else:
assert entry.start_date == dt.date(2015, 2, 9)
assert entry.end_date == dt.date(2015, 2, 28)
unit_price = billed_plan_amount
else:
assert entry.quantity == Decimal('0.00')
assert entry.start_date == subscription.trial_end + ONE_DAY
assert entry.end_date == dt.date(2015, 2, 28)
unit_price = entry.unit_price
if entry.unit_price < 0: # discount
unit_price *= -1
assert entry.unit_price == unit_price
assert entry.prorated
call_command('generate_docs', date=generate_docs_date('2015-02-10'),
subscription=subscription.pk, stdout=self.output)
assert Proforma.objects.all().count() == 2
def test_subscription_with_documents_generation_on_trial_end(self):
separate_cycles_during_trial = False
generate_documents_on_trial_end = True
metered_feature = MeteredFeatureFactory(
included_units_during_trial=Decimal('5.00'),
price_per_unit=Decimal('1.00')
)
plan = PlanFactory.create(interval=Plan.INTERVALS.MONTH,
interval_count=1, generate_after=120,
enabled=True, trial_period_days=15,
amount=Decimal('200.00'),
separate_cycles_during_trial=separate_cycles_during_trial,
generate_documents_on_trial_end=generate_documents_on_trial_end,
metered_features=[metered_feature])
subscription = SubscriptionFactory.create(plan=plan, start_date=dt.date(2015, 1, 25))
subscription.activate()
subscription.save()
subscription.customer.sales_tax_percent = None
subscription.customer.save()
mf_log = MeteredFeatureUnitsLogFactory.create(
subscription=subscription, metered_feature=metered_feature,
start_date=subscription.start_date, end_date=dt.date(2015, 1, 31),
consumed_units=Decimal('5.00')
)
call_command('generate_docs', date=generate_docs_date('2015-01-25'), stdout=self.output)
assert Proforma.objects.all().count() == 1
proforma = Proforma.objects.all()[0]
assert proforma.total == Decimal('0.00')
assert proforma.proforma_entries.count() == 4 # plan trial (+-) split by months (*2)
for entry in proforma.proforma_entries.all():
if entry.start_date == subscription.start_date:
assert entry.end_date == dt.date(2015, 1, 31)
unit_price = Decimal(7 / 31.0).quantize(Decimal('0.0000')) * plan.amount
else:
unit_price = Decimal(8 / 28.0).quantize(Decimal('0.0000')) * plan.amount
assert entry.start_date == dt.date(2015, 2, 1)
assert entry.end_date == subscription.trial_end
if entry.unit_price < 0: # discount
unit_price *= -1
assert entry.quantity == 1
assert entry.unit_price == unit_price
assert entry.prorated
call_command('generate_docs', date=generate_docs_date('2015-02-01'), stdout=self.output)
assert Proforma.objects.all().count() == 1
call_command('generate_docs', date=generate_docs_date('2015-02-09'), stdout=self.output)
proforma = Proforma.objects.all()[1]
plan_amount = Decimal(20 / 28.0).quantize(Decimal('0.0000')) * plan.amount
assert proforma.proforma_entries.count() == 3 # mfs during trial (+-) and remaining plan
for entry in proforma.proforma_entries.all():
if entry.product_code == plan.product_code:
assert entry.quantity == 1
unit_price = plan_amount
else:
assert entry.quantity == | |
+= 'x'
if (len(perms) > 0):
self.write_attribute(PERMISSIONS, perms)
has_contents = (self.options.MemoryContent.checked == True and
self.check_if_seg_contents(seg) == True)
self.close_tag(has_contents)
if (has_contents == True):
self.export_memory_contents(os.path.basename(binfilename),
self.binfile, seg.start_ea, seg.end_ea)
self.end_element(MEMORY_SECTION)
def export_program(self):
"""
Exports basic information about the program as the PROGRAM,
INFO_SOURCE, PROCESSOR, and COMPILER elements.
"""
# output the PROGRAM element
self.update_status(PROGRAM)
timer = time.clock()
self.start_element(PROGRAM)
self.write_attribute(NAME, idc.get_root_filename())
self.write_attribute(EXE_PATH, idc.get_input_file_path())
etype = ida_loader.get_file_type_name()
if (len(etype) > 0):
self.write_attribute(EXE_FORMAT, etype)
# check for presence of INPUT_MD5 netnode
md5 = ida_netnode.netnode(INPUT_MD5)
if md5 == BADNODE:
input_md5 = idc.retrieve_input_file_md5()
else:
input_md5 = md5.supval(ida_nalt.RIDX_MD5)
if input_md5 != None:
self.write_attribute(INPUT_MD5, input_md5)
self.close_tag(True)
# output the INFO_SOURCE element
self.start_element(INFO_SOURCE)
tool = 'IDA-Pro ' + ida_kernwin.get_kernel_version()
tool += ' XML plugin v' + IDAXML_VERSION + \
' (Python) SDK ' + str(IDA_SDK_VERSION)
self.write_attribute(TOOL, tool)
user = os.getenv("USERNAME", "UNKNOWN")
if (user == "UNKNOWN"):
user = os.getenv("USER", "UNKNOWN")
self.write_attribute(USER, user)
self.write_attribute(FILE, idc.get_idb_path())
ts = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
self.write_attribute(TIMESTAMP, ts)
self.close_tag()
# output the PROCESSOR element
self.start_element(PROCESSOR)
self.write_attribute(NAME, self.inf.procname)
if self.inf.is_be() == True:
byte_order = "big"
else:
byte_order = "little"
self.write_attribute(ENDIAN, byte_order)
self.seg_addr = False
bitness = 1
model_warning = False
nsegs = ida_segment.get_segm_qty()
if (nsegs > 0):
bitness = ida_segment.getnseg(0).bitness
for i in range(1, nsegs):
seg = ida_segment.getnseg(i)
if (seg.bitness != bitness):
model_warning = True
if (seg.bitness > bitness):
bitness = seg.bitness
addr_model = "32-bit"
if (bitness == 0):
addr_model = "16-bit"
elif (bitness == 2):
addr_model = "64-bit"
self.write_attribute(ADDRESS_MODEL, addr_model)
self.close_tag()
if (model_warning):
idc.msg("WARNING: Segments do not have same " +
"addressing model!\n")
if (ida_idp.ph.id == ida_idp.PLFM_386 and bitness == 0):
self.seg_addr = True
# find any overlayed memory before processing addressable items
self.find_overlay_memory()
# output compiler info
self.start_element(COMPILER)
# AM - Added to correctly convert the compiler name from IDA
# to Ghidra definitions. For x86 binaries, look at x86.ldefs
# specifications.
compiler_name = ida_typeinf.get_compiler_name(self.inf.cc.id)
print("GhIDA:: [DEBUG] compiler name: %s" % compiler_name)
new_compiler_name = ''
if addr_model == '16-bit':
new_compiler_name = 'default'
elif addr_model == '32-bit':
# Visual C++ is handled as "Visual Studio" by Ghidra
# Delphi is supported by Ghidra
# Borland C++ is supported by Ghidra
if compiler_name == "Visual C++" or \
compiler_name == "Delphi" or \
compiler_name == "Borland C++":
new_compiler_name = compiler_name
# Rename GNU C++ into gcc
elif compiler_name == "GNU C++":
new_compiler_name = "gcc"
# Watcom C++ is not supported by Ghidra
elif compiler_name == "Watcom C++":
print("GhIDA [!] Watcom C++ compiler not supported by Ghidra")
return False
# Visual Age C++ is not supported by Ghidra
elif compiler_name == "Visual Age C++":
print(
"GhIDA [!] Visual Age C++ compiler not supported by Ghidra")
return False
# Generic compiler are not supported by Ghidra
elif compiler_name == 'Unknown':
print("GhIDA [!] unknown compiler not supported by Ghidra")
return False
else:
print("GhIDA [!] compiler not supported by Ghidra")
return False
elif addr_model == '64-bit':
# Visual C++ is handled as "Visual Studio" by Ghidra
if compiler_name == "Visual C++":
new_compiler_name = compiler_name
# Rename GNU C++ into gcc
elif compiler_name == "GNU C++":
new_compiler_name = "gcc"
else:
print("GhIDA [!] unknown compiler not supported by Ghidra")
return False
print("GhIDA:: [DEBUG] new_compiler_name: %s" % new_compiler_name)
# self.write_attribute(
# NAME, ida_typeinf.get_compiler_name(self.inf.cc.id))
self.write_attribute(NAME, new_compiler_name)
self.close_tag()
self.display_cpu_time(timer)
return True
def export_program_entry_points(self):
"""
Exports entry points for the program.
"""
nepts = idc.get_entry_qty()
if (nepts == 0):
return
self.update_status(PROGRAM_ENTRY_POINTS)
timer = time.clock()
self.start_element(PROGRAM_ENTRY_POINTS, True)
for i in range(nepts):
self.start_element(PROGRAM_ENTRY_POINT)
addr = idc.get_entry(idc.get_entry_ordinal(i))
self.write_address_attribute(ADDRESS, addr)
self.close_tag()
self.end_element(PROGRAM_ENTRY_POINTS)
self.display_cpu_time(timer)
def export_register_values(self):
"""
Exports segment register value ranges.
"""
first = ida_idp.ph_get_reg_first_sreg()
last = ida_idp.ph_get_reg_last_sreg() + 1
has_segregareas = False
for j in range(first, last):
nsegregareas = ida_segregs.get_sreg_ranges_qty(j)
if nsegregareas != 0:
has_segregareas = True
break
if has_segregareas == False:
return
self.update_status(REGISTER_VALUES)
timer = time.clock()
self.start_element(REGISTER_VALUES, True)
sr = ida_segregs.sreg_range_t()
for j in range(first, last):
nsegregareas = ida_segregs.get_sreg_ranges_qty(j)
if nsegregareas == 0:
continue
for i in range(nsegregareas):
success = ida_segregs.getn_sreg_range(sr, j, i)
if success == False:
continue
value = sr.val
if value == idc.BADSEL:
continue
regname = ida_idp.ph.regnames[j]
if regname == None:
continue
if regname.lower() == "cs":
continue
if (ida_idp.ph.id == ida_idp.PLFM_TMS and
regname.lower() == "ds"):
continue
self.start_element(REGISTER_VALUE_RANGE)
self.write_attribute(REGISTER, ida_idp.ph.regnames[j])
self.write_numeric_attribute(VALUE, value)
self.write_address_attribute(START_ADDRESS, sr.start_ea)
length = (sr.end_ea - sr.start_ea) * self.cbsize
self.write_numeric_attribute(LENGTH, length)
self.close_tag()
self.end_element(REGISTER_VALUES)
self.display_cpu_time(timer)
def export_regular_cmt(self, cmt):
"""
Exports the regular comment for an item.
Args:
cmt: String containing the regular comment.
"""
self.write_comment_element(REGULAR_CMT, cmt)
def export_repeatable_cmt(self, cmt):
"""
Exports the repeatable comment for an item.
Args:
cmt: String containing the repeatable comment.
"""
self.write_comment_element(REPEATABLE_CMT, cmt)
def export_stack_frame(self, function):
"""
Export information about a function stack frame including
variables allocated on the stack.
Args:
function: IDA function instance
"""
sframe = ida_struct.get_struc(function.frame)
if sframe == None or sframe.memqty <= 0:
return
self.start_element(STACK_FRAME)
self.write_numeric_attribute(LOCAL_VAR_SIZE, function.frsize)
self.write_numeric_attribute(REGISTER_SAVE_SIZE, function.frregs)
retsize = ida_frame.get_frame_retsize(function)
self.write_numeric_attribute(RETURN_ADDR_SIZE, retsize)
self.write_numeric_attribute(BYTES_PURGED, function.argsize)
has_stack_vars = self.check_stack_frame(sframe)
self.close_tag(has_stack_vars)
if has_stack_vars == True:
self.export_stack_vars(function, sframe)
self.end_element(STACK_FRAME)
def export_stack_reference(self, addr):
"""
Exports references to stack variables at the address.
Args:
addr: Integer containing instruction address.
"""
f = idc.get_full_flags(addr)
for op in range(ida_ida.UA_MAXOP):
if idc.is_code(f) == True and ida_bytes.is_stkvar(f, op) == True:
insn = ida_ua.insn_t()
ida_ua.decode_insn(insn, addr)
opnd = insn.ops[op]
# TODO:How to handle opnd.type for stack references
optype = opnd.type
if optype == idc.o_void:
continue
# TODO:How to handle op_t_get_addr for stack references
SV = ida_frame.get_stkvar(insn, opnd, opnd.value)
if SV == None:
continue
(sv, actval) = SV
function = ida_funcs.get_func(addr)
self.start_element(STACK_REFERENCE)
self.write_address_attribute(ADDRESS, addr)
self.write_numeric_attribute(OPERAND_INDEX, op, 10)
offset = opnd.addr
spoff = offset - function.frregs
if offset > 0x7FFFFFFF:
offset -= 0x100000000
if spoff > 0x7FFFFFFF:
spoff -= 0x100000000
self.write_numeric_attribute(STACK_PTR_OFFSET, spoff,
16, True)
if (function.flags & idc.FUNC_FRAME) != 0:
self.write_numeric_attribute(FRAME_PTR_OFFSET,
offset, 16, True)
self.close_tag()
def export_stack_vars(self, function, sframe):
"""
Exports the stack variables (parameters and locals) in a stack frame.
Args:
function: IDA function instance.
sframe: IDA stack frame instance.
"""
for i in range(sframe.memqty):
member = sframe.get_member(i)
if member == None:
continue
mname = ida_struct.get_member_name(member.id)
if mname == None or len(mname) < 0:
continue
if mname == " s" or mname == " r":
continue
spoff = member.soff - function.frsize - function.frregs
froff = member.soff - function.frsize
self.start_element(STACK_VAR)
self.write_numeric_attribute(STACK_PTR_OFFSET, spoff, 16, True)
if function.flags & idc.FUNC_FRAME != 0:
self.write_numeric_attribute(FRAME_PTR_OFFSET, froff, 16, True)
pre = mname[0:4]
if pre != "var_" and pre != "arg_":
self.write_attribute(NAME, mname)
f = member.flag
size = ida_struct.get_member_size(member)
mtype = self.get_member_type(member)
msize = size
if idc.is_struct(f) == True:
msize = idc.get_struc_size(ida_struct.get_struc_id(mtype))
elif idc.is_strlit(f) == False:
mtibuf = ida_nalt.opinfo_t()
mti = ida_struct.retrieve_member_info(mtibuf, member)
# TODO: How to handle get_data_type_size (for stack vars)
#msize = idaapi.get_data_type_size(f, mtibuf)
if size < msize:
size = msize
if (idc.is_strlit(f) == False and ida_bytes.is_align(f) == False
and size != msize):
mtype = "%s[%d]" % (mtype, size / msize)
self.write_attribute(DATATYPE, mtype)
self.write_numeric_attribute(SIZE, size * self.cbsize)
regcmt = ida_struct.get_member_cmt(member.id, False)
rptcmt = ida_struct.get_member_cmt(member.id, True)
if regcmt != None:
regcmt = ida_lines.tag_remove(regcmt + " ", 0)
if rptcmt != None:
rptrcmt = ida_lines.tag_remove(rptcmt + " ", 0)
has_regcmt = regcmt != None and len(regcmt) > 0
has_rptcmt = rptcmt != None and len(rptcmt) > 0
has_content = has_regcmt or has_rptcmt
self.close_tag(has_content)
if has_content == True:
if has_regcmt == True:
self.export_regular_cmt(regcmt)
if has_rptcmt == True:
self.export_repeatable_cmt(rptcmt)
self.end_element(STACK_VAR)
def export_structures(self):
"""
Exports information about all structures and unions.
"""
structs = idautils.Structs()
for struct in structs:
(idx, sid, sname) = struct
s = ida_struct.get_struc(sid)
stype = STRUCTURE
if s.is_union() == True:
stype = UNION
self.start_element(stype)
self.write_attribute(NAME, sname)
size = idc.get_struc_size(sid) * self.cbsize
self.write_numeric_attribute(SIZE, size)
if s.is_varstr() == True:
self.write_attribute(VARIABLE_LENGTH, "y")
regcmt = idc.get_struc_cmt(sid, False)
rptcmt = idc.get_struc_cmt(sid, True)
has_contents = regcmt != None or rptcmt != None or s.memqty > 0
self.close_tag(has_contents)
if (has_contents):
if regcmt != None:
self.export_regular_cmt(regcmt)
if rptcmt != None:
self.export_repeatable_cmt(rptcmt)
if s.memqty > 0:
self.export_members(s)
self.end_element(stype)
def export_symbol(self, addr, name, stype=""):
"""
Exports name for an address as a SYMBOL element. If the name is a
demangled name, add the mangled name as the MANGLED attribute.
| |
<filename>email_reply_parser/__init__.py
"""
email_reply_parser is a python library port of GitHub's Email Reply Parser.
For more information, visit https://github.com/zapier/email-reply-parser
"""
import re
class EmailReplyParser(object):
""" Represents a email message that is parsed.
"""
@staticmethod
def read(text):
""" Factory method that splits email into list of fragments
text - A string email body
Returns an EmailMessage instance
"""
return EmailMessage(text).read()
@staticmethod
def parse_reply(text):
""" Provides the reply portion of email.
text - A string email body
Returns reply body message
"""
return EmailReplyParser.read(text).reply
class EmailMessage(object):
""" An email message represents a parsed email body.
"""
SIG_REGEX = re.compile(r'(--|__|-\w)|(^Sent from my (\w+\s*){1,3})')
QUOTE_HDR_REGEX = re.compile('On.*wrote:$')
QUOTED_REGEX = re.compile(r'(>+)')
HEADER_REGEX = re.compile(r'^\*?(From|Sent|To|Subject):\*? .+')
_MULTI_QUOTE_HDR_REGEX = r'(?!On.*On\s.+?wrote:)(On\s(.+?)wrote:)'
MULTI_QUOTE_HDR_REGEX = re.compile(_MULTI_QUOTE_HDR_REGEX, re.DOTALL | re.MULTILINE)
MULTI_QUOTE_HDR_REGEX_MULTILINE = re.compile(_MULTI_QUOTE_HDR_REGEX, re.DOTALL)
def __init__(self, text):
self.fragments = []
self.fragment = None
self.text = text.replace('\r\n', '\n')
self.found_visible = False
def read(self):
""" Creates new fragment for each line
and labels as a signature, quote, or hidden.
Returns EmailMessage instance
"""
self.found_visible = False
is_multi_quote_header = self.MULTI_QUOTE_HDR_REGEX_MULTILINE.search(self.text)
if is_multi_quote_header:
self.text = self.MULTI_QUOTE_HDR_REGEX.sub(is_multi_quote_header.groups()[0].replace('\n', ''), self.text)
# Fix any outlook style replies, with the reply immediately above the signature boundary line
# See email_2_2.txt for an example
self.text = re.sub('([^\n])(?=\n ?[_-]{7,})', '\\1\n', self.text, re.MULTILINE)
self.lines = self.text.split('\n')
self.lines.reverse()
for line in self.lines:
self._scan_line(line)
self._finish_fragment()
self.fragments.reverse()
return self
@property
def reply(self):
""" Captures reply message within email
"""
reply = []
for f in self.fragments:
if not (f.hidden or f.quoted):
reply.append(f.content)
return '\n'.join(reply)
def _scan_line(self, line):
""" Reviews each line in email message and determines fragment type
line - a row of text from an email message
"""
is_quote_header = self.QUOTE_HDR_REGEX.match(line) is not None
is_quoted = self.QUOTED_REGEX.match(line) is not None
is_header = is_quote_header or self.HEADER_REGEX.match(line) is not None
if self.fragment and len(line.strip()) == 0:
if self.SIG_REGEX.match(self.fragment.lines[-1].strip()):
self.fragment.signature = True
self._finish_fragment()
if self.fragment \
and ((self.fragment.headers == is_header and self.fragment.quoted == is_quoted) or
(self.fragment.quoted and (is_quote_header or len(line.strip()) == 0))):
self.fragment.lines.append(line)
else:
self._finish_fragment()
self.fragment = Fragment(is_quoted, line, headers=is_header)
def quote_header(self, line):
""" Determines whether line is part of a quoted area
line - a row of the email message
Returns True or False
"""
return self.QUOTE_HDR_REGEX.match(line[::-1]) is not None
def _finish_fragment(self):
""" Creates fragment
"""
if self.fragment:
self.fragment.finish()
if self.fragment.headers:
# Regardless of what's been seen to this point, if we encounter a headers fragment,
# all the previous fragments should be marked hidden and found_visible set to False.
self.found_visible = False
for f in self.fragments:
f.hidden = True
if not self.found_visible:
if self.fragment.quoted \
or self.fragment.headers \
or self.fragment.signature \
or (len(self.fragment.content.strip()) == 0):
self.fragment.hidden = True
else:
self.found_visible = True
self.fragments.append(self.fragment)
self.fragment = None
class Fragment(object):
""" A Fragment is a part of
an Email Message, labeling each part.
"""
def __init__(self, quoted, first_line, headers=False):
self.signature = False
self.headers = headers
self.hidden = False
self.quoted = quoted
self._content = None
self.lines = [first_line]
def finish(self):
""" Creates block of content with lines
belonging to fragment.
"""
self.lines.reverse()
self._content = '\n'.join(self.lines)
self.lines = None
@property
def content(self):
return self._content.strip()
# Colour constants
bold=`tput bold`
green=`tput setaf 2`
red=`tput setaf 1`
reset=`tput sgr0`
ALICE_PORT=10001
BOB_PORT=10002
ALICE_LOG=bin/testnet/test/alice.txt
BOB_LOG=bin/testnet/test/bob.txt
if test -d bin; then cd bin; fi
echo "${bold}Mounting a RAM disk for server output in test directory!${reset}"
if mountpoint -q -- "test"; then
sudo umount test
fi
rm -r test | true # in case this is the first time being run
mkdir test && sudo mount -t tmpfs -o size=5000m tmpfs test
# Source Intel Libraries
source /opt/intel/sgxsdk/environment
pushd ../../ # go to source directory
echo "${bold}Starting two ghost teechain enclaves...${reset}"
echo "${bold}Spawning enclave ALICE listening on port $ALICE_PORT in $ALICE_LOG ${reset}"
./teechain ghost -d -p $ALICE_PORT > $ALICE_LOG 2>&1 &
sleep 1
echo "${bold}Spawning enclave BOB listening on port $BOB_PORT in $BOB_LOG ${reset}"
./teechain ghost -d -p $BOB_PORT > $BOB_LOG 2>&1 &
sleep 1
echo -n "${red}Waiting until enclaves are initialized ...!${reset}"
for u in alice bob; do #TODO: generalize to multiple parties (not just 4)
while [ "$(grep -a 'Enclave created' bin/testnet/test/${u}.txt | wc -l)" -eq 0 ]; do
sleep 0.1
echo -n "."
done
done
# Create primaries
./teechain primary -p $ALICE_PORT
./teechain primary -p $BOB_PORT
# Setup up primaries with number of deposits
./teechain setup_deposits 5 -p $ALICE_PORT
./teechain setup_deposits 3 -p $BOB_PORT
# Deposits made
./teechain deposits_made mmY6ijr6uLP3DdRFC4nwL23HSKsH2xgy74 1 5 edec34c9bb3a4395cd8d1e9300725f537235d8a058fc6a7ae519003b64fd0feA 0 1 edec34c9bb3a4395cd8d1e9300725f537235d8a058fc6a7ae519003b64fd0feA 1 1 edec34c9bb3a4395cd8d1e9300725f537235d8a058fc6a7ae519003b64fd0feA 2 1 edec34c9bb3a4395cd8d1e9300725f537235d8a058fc6a7ae519003b64fd0feA 3 1 edec34c9bb3a4395cd8d1e9300725f537235d8a058fc6a7ae519003b64fd0feA 4 1 -p $ALICE_PORT
./teechain deposits_made my6NJU1T6gL5f3TfmSPN4idUytdCQHTmsU 1 3 edec34c9bb3a4395cd8d1e9300725f537235d8a058fc6a7ae519003b64fd0feB 0 1 edec34c9bb3a4395cd8d1e9300725f537235d8a058fc6a7ae519003b64fd0feB 1 1 edec34c9bb3a4395cd8d1e9300725f537235d8a058fc6a7ae519003b64fd0feB 2 1 -p $BOB_PORT
# Create and establish a channel between Alice and Bob
./teechain create_channel -p $BOB_PORT &
sleep 1
./teechain create_channel -i -r 127.0.0.1:$BOB_PORT -p $ALICE_PORT # Initiator
sleep 2
# Extract the channel id for the channel created
CHANNEL_1=$(grep "Channel ID:" $ALICE_LOG | awk '{print $3}')
# Verified the setup transactions are in the blockchain
./teechain verify_deposits $CHANNEL_1 -p $BOB_PORT &
./teechain verify_deposits $CHANNEL_1 -p $ALICE_PORT
sleep 2
# Alice and Bob add deposits to their channels now
./teechain add_deposit $CHANNEL_1 0 -p $ALICE_PORT
./teechain add_deposit $CHANNEL_1 0 -p $BOB_PORT
# Alice check balance matches expected
./teechain balance $CHANNEL_1 -p $ALICE_PORT
if ! tail -n 2 $ALICE_LOG | grep -q "My balance is: 1, remote balance is: 1"; then
echo "Alice's balance check failed on channel setup!"; exit 1;
fi
# Send from Bob to Alice
./teechain send $CHANNEL_1 1 -p $BOB_PORT
# Alice check balance after
./teechain balance $CHANNEL_1 -p $ALICE_PORT
if ! tail -n 2 $ALICE_LOG | grep -q "My balance is: 2, remote balance is: 0"; then
echo "Alice's balance check failed after send!"; exit 1;
fi
# Send from Bob to Alice should fail. Bob check balance, shouldn't have changed
./teechain send $CHANNEL_1 1 -p $BOB_PORT
./teechain balance $CHANNEL_1 -p $BOB_PORT
if ! tail -n 2 $BOB_LOG | grep -q "My balance is: 0, remote balance is: 2"; then
echo "Bob's balance check failed!"; exit 1;
fi
# Add deposit from bob's side and check balance
./teechain add_deposit $CHANNEL_1 1 -p $BOB_PORT
./teechain balance $CHANNEL_1 -p $BOB_PORT
if ! tail -n 2 $BOB_LOG | grep -q "My balance is: 1, remote balance is: 2"; then
echo "Bob's balance check failed!"; exit 1;
fi
echo "Bob added another deposit to his channel!"
# Send from Bob to Alice and check balance is back to zero
./teechain send $CHANNEL_1 1 -p $BOB_PORT
./teechain balance $CHANNEL_1 -p $BOB_PORT
if ! tail -n 2 $BOB_LOG | grep -q "My balance is: 0, remote balance is: 3"; then
echo "Bob's balance check failed!"; exit 1;
fi
# Send from Alice to Bob and check Bob's balance on Alice's side
./teechain send $CHANNEL_1 1 -p $ALICE_PORT
./teechain balance $CHANNEL_1 -p $ALICE_PORT
if ! tail -n 2 $ALICE_LOG | grep -q "My balance is: 2, remote balance is: 1"; then
echo "Alice's balance check failed!"; exit 1;
fi
# Bob remove deposit and check balance
./teechain remove_deposit $CHANNEL_1 1 -p $BOB_PORT
./teechain balance $CHANNEL_1 -p $BOB_PORT
if ! tail -n 2 $BOB_LOG | grep -q "My balance is: 0, remote balance is: 2"; then
echo "Bob's balance check failed!"; exit 1;
fi
echo "Bob removed the deposit from his channel!"
# Bob try to remove first deposit, should fail as insufficient funds
./teechain remove_deposit $CHANNEL_1 0 -p $BOB_PORT
./teechain balance $CHANNEL_1 -p $BOB_PORT
if ! tail -n 2 $BOB_LOG | grep -q "My balance is: 0, remote balance is: 2"; then
echo "Bob's balance check failed!"; exit 1;
fi
echo "Bob removed his last deposit from the channel!"
# Bob now send 1 to alice
./teechain send $CHANNEL_1 1 -p $BOB_PORT
./teechain balance $CHANNEL_1 -p $BOB_PORT
if ! tail -n 2 $BOB_LOG | grep -q "My balance is: 0, remote balance is: 1"; then
echo "Bob's balance check failed!"; exit 1;
fi
echo "Bob sent 1 to Alice!"
# Alice removed last deposit from channel
./teechain remove_deposit $CHANNEL_1 0 -p $ALICE_PORT
./teechain balance $CHANNEL_1 -p $ALICE_PORT
if ! tail -n 2 $ALICE_LOG | grep -q "My balance is: 0, remote balance is: 0"; then
echo "Alice's balance check failed!"; exit 1;
fi
echo "Alice removed her last deposit from the channel!"
# Bob now send 1 to alice
./teechain send $CHANNEL_1 1 -p $BOB_PORT
./teechain balance $CHANNEL_1 -p $BOB_PORT
if ! tail -n 2 $BOB_LOG | grep -q "My balance is: 0, remote balance is: 0"; then
echo "Bob's balance check failed!"; exit 1;
fi
echo "Bob tried to send 1 to alice, but it didnt work!"
# Add all the | |
# -*- coding: utf-8 -*-
"""Total generation plots.
This module plots figures of total generation for a year, month etc.
@author: <NAME>
"""
import logging
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.patches import Patch
import matplotlib.ticker as mtick
import marmot.config.mconfig as mconfig
import marmot.plottingmodules.plotutils.plot_library as plotlib
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData,
MissingZoneData)
custom_legend_elements = Patch(facecolor='#DD0200',
alpha=0.5, edgecolor='#DD0200',
label='Unserved Energy')
custom_legend_elements_month = Patch(facecolor='#DD0200',alpha=0.7,
edgecolor='#DD0200',
label='Unserved_Energy')
class MPlot(PlotDataHelper):
"""total_generation MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The total_genertion.py module contains methods that are
display the total amount of generation over a given time period.
MPlot inherits from the PlotDataHelper class to assist in creating figures.
Attributes:
MONTHS (dict) = dictionary of months.
"""
MONTHS = { 1 : "January",
2 : "February",
3 : "March",
4 : "April",
5 : "May",
6 : "June",
7 : "July",
8 : "August",
9 : "September",
10 : "October",
11 : "November",
12 : "December"
}
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
self.x = mconfig.parser("figure_size","xdimension")
self.y = mconfig.parser("figure_size","ydimension")
self.y_axes_decimalpt = mconfig.parser("axes_options","y_axes_decimalpt")
self.curtailment_prop = mconfig.parser("plot_data","curtailment_property")
def total_gen(self, start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a stacked bar plot of total generation by technology type.
A separate bar is created for each scenario.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
# Create Dictionary to hold Datframes for each scenario
outputs = {}
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Generation",self.Scenarios),
(False,f"generator_{self.curtailment_prop}",self.Scenarios),
(False,"generator_Pump_Load",self.Scenarios),
(True,f"{agg}_Load",self.Scenarios),
(False,f"{agg}_Unserved_Energy",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
outputs = MissingInputData()
return outputs
for zone_input in self.Zones:
# Will hold retrieved data for each scenario
gen_chunks = []
load_chunk = []
pumped_load_chunk = []
total_demand_chunk = []
unserved_energy_chunk = []
self.logger.info(f"Zone = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Gen_Stack = self['generator_Generation'].get(scenario)
#Check if zone has generation, if not skips
try:
Total_Gen_Stack = Total_Gen_Stack.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No installed capacity in: {zone_input}")
continue
Total_Gen_Stack = self.df_process_gen_inputs(Total_Gen_Stack)
# Calculates interval step to correct for MWh of generation
interval_count = PlotDataHelper.get_sub_hour_interval_count(Total_Gen_Stack)
curtailment_name = self.gen_names_dict.get('Curtailment','Curtailment')
# Insert Curtailment into gen stack if it exists in database
Stacked_Curt = self[f"generator_{self.curtailment_prop}"].get(scenario)
if not Stacked_Curt.empty:
if zone_input in Stacked_Curt.index.get_level_values(self.AGG_BY).unique():
Stacked_Curt = Stacked_Curt.xs(zone_input,level=self.AGG_BY)
Stacked_Curt = self.df_process_gen_inputs(Stacked_Curt)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
Stacked_Curt = self.assign_curtailment_techs(Stacked_Curt)
Stacked_Curt = Stacked_Curt.sum(axis=1)
Total_Gen_Stack.insert(len(Total_Gen_Stack.columns),
column=curtailment_name, value=Stacked_Curt) #Insert curtailment into
Total_Gen_Stack = Total_Gen_Stack.loc[:, (Total_Gen_Stack != 0).any(axis=0)]
Total_Gen_Stack = Total_Gen_Stack/interval_count
if pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
Total_Gen_Stack = Total_Gen_Stack[start_date_range:end_date_range]
Total_Gen_Stack = Total_Gen_Stack.sum(axis=0)
Total_Gen_Stack.rename(scenario, inplace=True)
Total_Load = self[f"{agg}_Load"].get(scenario)
Total_Load = Total_Load.xs(zone_input,level=self.AGG_BY)
Total_Load = Total_Load.groupby(["timestamp"]).sum()
if pd.notna(start_date_range):
Total_Load = Total_Load[start_date_range:end_date_range]
Total_Load = Total_Load.rename(columns={0:scenario}).sum(axis=0)
Total_Load = Total_Load/interval_count
Unserved_Energy = self[f"{agg}_Unserved_Energy"][scenario]
if Unserved_Energy.empty:
Unserved_Energy = self[f"{agg}_Load"][scenario].copy()
Unserved_Energy.iloc[:,0] = 0
Unserved_Energy = Unserved_Energy.xs(zone_input,level=self.AGG_BY)
Unserved_Energy = Unserved_Energy.groupby(["timestamp"]).sum()
if pd.notna(start_date_range):
Unserved_Energy = Unserved_Energy[start_date_range:end_date_range]
Unserved_Energy = Unserved_Energy.rename(columns={0:scenario}).sum(axis=0)
Unserved_Energy = Unserved_Energy/interval_count
# subtract unserved energy from load for graphing (not sure this is actually used)
if (Unserved_Energy == 0).all() == False:
Unserved_Energy = Total_Load - Unserved_Energy
Pump_Load = self["generator_Pump_Load"][scenario]
if Pump_Load.empty or not mconfig.parser("plot_data","include_total_pumped_load_line"):
Pump_Load = self['generator_Generation'][scenario].copy()
Pump_Load.iloc[:,0] = 0
Pump_Load = Pump_Load.xs(zone_input,level=self.AGG_BY)
Pump_Load = Pump_Load.groupby(["timestamp"]).sum()
if pd.notna(start_date_range):
Pump_Load = Pump_Load[start_date_range:end_date_range]
Pump_Load = Pump_Load.rename(columns={0:scenario}).sum(axis=0)
Pump_Load = Pump_Load/interval_count
if (Pump_Load == 0).all() == False:
Total_Demand = Total_Load - Pump_Load
else:
Total_Demand = Total_Load
gen_chunks.append(Total_Gen_Stack)
load_chunk.append(Total_Load)
pumped_load_chunk.append(Pump_Load)
total_demand_chunk.append(Total_Demand)
unserved_energy_chunk.append(Unserved_Energy)
if not gen_chunks:
outputs[zone_input] = MissingZoneData()
continue
Total_Generation_Stack_Out = pd.concat(gen_chunks, axis=1, sort=False).fillna(0)
Total_Load_Out = pd.concat(load_chunk, axis=0, sort=False)
Pump_Load_Out = pd.concat(pumped_load_chunk, axis=0, sort=False)
Total_Demand_Out = pd.concat(total_demand_chunk, axis=0, sort=False)
Unserved_Energy_Out = pd.concat(unserved_energy_chunk, axis=0, sort=False)
Total_Load_Out = Total_Load_Out.rename('Total Load (Demand + \n Storage Charging)')
Total_Demand_Out = Total_Demand_Out.rename('Total Demand')
Unserved_Energy_Out = Unserved_Energy_Out.rename('Unserved Energy')
Total_Generation_Stack_Out = self.create_categorical_tech_index(Total_Generation_Stack_Out)
Total_Generation_Stack_Out = Total_Generation_Stack_Out.T
Total_Generation_Stack_Out = Total_Generation_Stack_Out.loc[:, (Total_Generation_Stack_Out != 0).any(axis=0)]
# unit conversion return divisor and energy units
unitconversion = PlotDataHelper.capacity_energy_unitconversion(max(Total_Generation_Stack_Out.sum(axis=1)))
Total_Generation_Stack_Out = Total_Generation_Stack_Out/unitconversion['divisor']
Total_Load_Out = Total_Load_Out.T/unitconversion['divisor']
Pump_Load_Out = Pump_Load_Out.T/unitconversion['divisor']
Total_Demand_Out = Total_Demand_Out.T/unitconversion['divisor']
Unserved_Energy_Out = Unserved_Energy_Out.T/unitconversion['divisor']
# Data table of values to return to main program
Data_Table_Out = pd.concat([Total_Load_Out.T,
Total_Demand_Out.T,
Unserved_Energy_Out.T,
Total_Generation_Stack_Out], axis=1, sort=False)
Data_Table_Out = Data_Table_Out.add_suffix(f" ({unitconversion['units']}h)")
Total_Generation_Stack_Out.index = Total_Generation_Stack_Out.index.str.replace('_',' ')
fig1, ax = plt.subplots(figsize=(self.x,self.y))
Total_Generation_Stack_Out.plot.bar(stacked=True, ax=ax,
color=[self.PLEXOS_color_dict.get(x, '#333333')
for x in Total_Generation_Stack_Out.columns],
edgecolor='black', linewidth='0.1')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel(f"Total Generation ({unitconversion['units']}h)",
color='black', rotation='vertical')
ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(
lambda x, p: format(x, f',.{self.y_axes_decimalpt}f')))
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Total_Generation_Stack_Out.index
PlotDataHelper.set_barplot_xticklabels(tick_labels, ax=ax)
ax.tick_params(axis='y', which='major', length=5, width=1)
ax.tick_params(axis='x', which='major', length=5, width=1)
if mconfig.parser("plot_title_as_region"):
ax.set_title(zone_input)
for n, scenario in enumerate(self.Scenarios):
x = [ax.patches[n].get_x(), ax.patches[n].get_x() + ax.patches[n].get_width()]
height1 = [float(Total_Load_Out[scenario].sum())]*2
lp1 = plt.plot(x,height1, c='black', linewidth=3)
if Pump_Load_Out[scenario] > 0:
height2 = [float(Total_Demand_Out[scenario])]*2
lp2 = plt.plot(x,height2, 'r--', c='black', linewidth=1.5)
if Unserved_Energy_Out[scenario] > 0:
height3 = [float(Unserved_Energy_Out[scenario])]*2
plt.plot(x,height3, c='#DD0200', linewidth=1.5)
ax.fill_between(x, height3, height1,
facecolor = '#DD0200',
alpha=0.5)
handles, labels = ax.get_legend_handles_labels()
#Combine all legends into one.
if Pump_Load_Out.values.sum() > 0:
handles.append(lp2[0])
handles.append(lp1[0])
labels += ['Demand','Demand + \n Storage Charging']
else:
handles.append(lp1[0])
labels += ['Demand']
if Unserved_Energy_Out.values.sum() > 0:
handles.append(custom_legend_elements)
labels += ['Unserved Energy']
ax.legend(reversed(handles),reversed(labels), loc='lower left',
bbox_to_anchor=(1.05,0), facecolor='inherit', frameon=True)
outputs[zone_input] = {'fig': fig1, 'data_table': Data_Table_Out}
return outputs
def total_gen_diff(self, start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a stacked bar plot of total generation by technology type, relative to a base scenario.
Barplots show the change in total generation relative to a base scenario.
The default is to comapre against the first scenario provided in the inputs list.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
# Create Dictionary to hold Datframes for each scenario
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, "generator_Generation", self.Scenarios),
(False, f"generator_{self.curtailment_prop}", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
if 1 in check_input_data:
outputs = MissingInputData()
return outputs
for zone_input in self.Zones:
self.logger.info(f"Zone = {zone_input}")
gen_chunks =[]
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Gen_Stack = self['generator_Generation'].get(scenario)
#Check if zone has generation, if not skips and breaks out of Multi_Scenario loop
try:
Total_Gen_Stack = Total_Gen_Stack.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No installed capacity in : {zone_input}")
break
Total_Gen_Stack = self.df_process_gen_inputs(Total_Gen_Stack)
# Calculates interval step to correct for MWh of generation
interval_count = PlotDataHelper.get_sub_hour_interval_count(Total_Gen_Stack)
# Insert Curtailment into gen stack if it exists in database
Stacked_Curt = self[f"generator_{self.curtailment_prop}"].get(scenario)
if not Stacked_Curt.empty:
curtailment_name = self.gen_names_dict.get('Curtailment','Curtailment')
if zone_input in Stacked_Curt.index.get_level_values(self.AGG_BY).unique():
Stacked_Curt = Stacked_Curt.xs(zone_input,level=self.AGG_BY)
Stacked_Curt = self.df_process_gen_inputs(Stacked_Curt)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
Stacked_Curt = self.assign_curtailment_techs(Stacked_Curt)
Stacked_Curt = Stacked_Curt.sum(axis=1)
Total_Gen_Stack.insert(len(Total_Gen_Stack.columns),
| |
The category will be determined
automatically in the former case.
Depending on the assertion, the order of the items may matter.
Parameters
----------
solver : Solver
The solver that this assertion operates in.
item1 :
The first item of interest.
item2 :
The second item of interest.
category :
The label of the category that links the items.
"""
self._dict__['solver'] = solver
self.__dict__['pos1'], self.__dict__['cat1'] = \
solver.item_to_pos(item1)
self.__dict__['pos2'], self.__dict__['cat2'] = \
solver.item_to_pos(item2)
self.__dict__['cat'] = solver.categories.index(category)
self.__dict__['key'] = self.default_key if key is None else key
self.__dict__['_cat_slice'] = solver.cat_slice(self.cat)
self._depth = 0
def __repr__(self):
"""
Return a computer-readable-ish string representation.
"""
return f'{type(self).__name__}(solver, {", ".join(self._rlabels())})'
def __str__(self):
"""
Return a human readable string representation.
The default implementation delegates to :py:meth:`__repr__`, so
child classes should override it.
"""
return repr(self)
@property
def solver(self):
"""
The :py:class:`Solver` that this assertion is part of.
"""
return self.__dict__['solver']
@property
def pos1(self):
"""
The index of the first item this assertion links, as a position
in :py:attr:`solver`\ 's :py:attr:`~Solver.matrix`.
"""
return self.__dict__['pos1']
@property
def cat1(self):
"""
The category index of the first item this assertion links, as an
index into :py:attr:`solver`\ 's :py:attr:`~Solver.categories`.
This may or may not be the same as :py:attr:`cat` and
:py:attr:`cat2`.
"""
return self.__dict__['cat1']
@property
def pos2(self):
"""
The index of the second item this assertion links, as a position
in :py:attr:`solver`\ 's :py:attr:`~Solver.matrix`.
"""
return self.__dict__['pos2']
@property
def cat2(self):
"""
The category index of the second item this assertion links, as
an index into :py:attr:`solver`\ 's :py:attr:`~Solver.categories`.
This may or may not be the same as :py:attr:`cat` and
:py:attr:`cat1`.
"""
return self.__dict__['cat2']
@property
def cat(self):
"""
The index of the category through which the items in this
assertion are linked.
This may or may not be the same as either of :py:attr:`cat1` and
:py:attr:`cat2`, but it can't be the same as both.
"""
return self.__dict__['cat']
@property
def key(self):
"""
A function that maps the items in :py:attr:`cat` to comparable
values.
This function must accept a :py:class:`Solver` and a matrix
position as arguments. The return value is the type of key that
:py:meth:`op` supports. The default implementation of this
method is :py:meth:`default_key`, which just maps the position
to the corresponding label.
All currently implemented assertions have a built-in assumption
that the result of this function will be a number, but this is
not a requirement in the general case.
"""
return self.__dict__['key']
@property
def satisfied(self):
"""
Determines if this assertion has been satisfied based on the
additional information provided to the solver.
An assertion is satisfied when the number of links between each
of the items of interest and the linking category is one.
"""
s1 = self.solver.matrix[self.pos1, self._cat_slice].sum()
s2 = self.solver.matrix[self.pos2, self._cat_slice].sum()
return s1 == s2 == 1
@property
def _cat_slice(self):
"""
A :py:class:`slice` object used to access the elements that
correspond to :py:attr:`cat` in a matrix row.
This is provided as a convenience to avoid calling
:py:meth:`Solver._cat_slice` multiple times.
"""
return self.__dict__['_cat_slice']
def verify(self):
"""
Check the assertion for all available links from the items to
the category, and remove any invalid possibilities.
This method may end up being recursive if a link removal
triggers a re-verification of this assertion. In that case, the
current execution will be aborted in favor of the updated one.
"""
self.solver._indent()
# Initialize the
self._depth += int(copysign(self._depth, 1))
depth = self._depth
self.solver._log('Verifying assertion {0:*:S}', self, start=True)
self.solver._log('Current Depth = {0:P:P}', depth, cont=True)
count = 0
options1 = {self.key(self.solver, pos): pos
for pos in self.solver.linked_set(self.pos1, self.cat)}
options2 = {self.key(self.solver, pos): pos
for pos in self.solver.linked_set(self.pos2, self.cat)}
to_check2 = options2.copy()
self.solver._log('{0:N:C} options for {1:P:I}: {2:S:L}',
self.cat, self.pos1, options1, cont=True)
self.solver._log('{0:N:C} options for {1:P:I}: {2:S:L}\n',
self.cat, self.pos2, options2, cont=True)
self.solver._log('Checking options for {0:P:I}', self.pos1)
self.solver._indent()
for i1 in options1:
i2 = self.is_valid(i1, options2.keys())
p1 = options1[i1]
if i2 is None:
self.solver._log('No match for {0:P:I}: unlinking from {1:P:I}',
p1, self.pos1)
count += self.solver.unlink(p1, self.pos1)
if not self._check_depth(depth):
return count
else:
p2 = options2[i2]
self.solver._log('{0:P:I} matches {1:P:I}', p1, p2)
to_check2.pop(i2, None)
self.solver._dedent()
self.solver._log('Checking remaining options for {0:P:I}', self.pos2)
self.solver._indent()
for i2 in to_check2:
i1 = self.is_valid(i2, options1.keys(), reverse=True)
p2 = to_check2[i2]
if i1 is None:
self.solver._log('No match for {0:P:I}: unlinking from {1:P:I}',
p2, self.pos2)
count += self.solver.unlink(p2, self.pos2)
if not self._check_depth(depth):
return count
else:
p1 = options1[i1]
self.solver._log('{0:P:I} matches {1:P:I}', p2, p1)
self.solver._dedent()
if self._depth > 0 and self.satisfied:
self.solver.remove_assertion(self)
self._depth = -self._depth
if depth == 1:
self._depth = 0
self.solver._dedent()
return count
def op(self, key1, key2):
"""
The comparison operation that this assertion represents.
This method is applied to items in the linking category after
the key function has been applied to them. The key function is
just the item label by default, but does not have to be. The
order of the inputs matters (in the general case).
Parameters
----------
key1 :
The first (left) item key to compare.
key2 :
The second (right) item key to compare.
Return
------
bool :
A flag determining if the comparison succeeded or not.
"""
raise NotImplementedError('Please implement this method in a '
'child class')
def is_valid(self, key, options, reverse=False):
"""
Verify that there is at least one option ``opt`` in `options`
that returns `True` for ``op(key, opt)``.
The key is just the item label by default, but does not have to
be.
The default implementation performs a linear search of
`options` using :py:meth:`op`. This method is provided to allow
children to optimize the comparison.
Parameters
----------
key :
The item key to check.
options : set
A set of options to compare against.
reverse : bool
If `True`, the comparison is ``op(opt, key)`` instead of
the usual ``op(key, opt)``.
Return
------
opt :
The first encountered option that makes `item` valid, or
`None` if invalid.
"""
op = (lambda key, opt: self.op(opt, key)) if reverse else self.op
checker = (opt for opt in options if op(key, opt))
return next(checker, None)
def update(self, pos12, posC):
"""
Called when a link that is between either :py:attr:`pos1` or
:py:attr:`pos2` and `posC` in :py:attr:`cat` is severed.
The default is to re-:py:meth:`verify` the assersion as long as
`pos12` is indeed either :py:attr:`pos1` or :py:attr:`pos2` and
`posC` is in :py:attr:`cat`.
An assertion must remove itself if it is satisfied by an update.
"""
self.solver._indent()
if pos12 not in (self.pos1, self.pos2):
self.solver._log('Skipping update: {0:P:I} is neither{1:P:I} '
'nor {2:P:I}', pos12, self.pos1, self.pos2)
self._dedent()
return 0
if posC // self.solver.n != self.cat:
self.solver._log('Skipping update: {0:P:I} is not in {2:N:C}',
posC, self.cat)
self._dedent()
return 0
self.solver._log('Triggering Verification')
count = self.verify()
self.solver._dedent()
if self.satisfied:
self.solver.remove_assertion(self)
return count
def default_key(self, solver, pos):
"""
The default value for comparison is just the `solver`\ 's label
at `pos`.
The key function can either be passed in the constructor or
overriden in a child class. In either case, it must accept a
:py:class:`Solver` object and a matrix position as arguments.
"""
return solver.labels[pos]
def _check_depth(self, depth):
"""
Check if :py:meth:`verify` has been called since the current
`depth` was entered.
If the current :py:attr:`_depth` does not match `depth`, return
`False`. If `depth` is 0 or 1, reset :py:attr:`_depth` to 0
since the stack is unwound.
"""
if depth != self._depth:
if depth == 1:
self._depth = 0
return False
return True
def _labels(self):
"""
Retrieves the labels for :py:attr:`pos1`, :py:attr:`pos2` and
:py:attr:`cat`, for use with :py:meth:`__str__`.
"""
i1 = ':'.join(map(repr, self.solver.pos_to_item(self.pos1)))
i2 = ':'.join(map(repr, self.solver.pos_to_item(self.pos2)))
c = repr(self.solver.categories[self.cat])
return i1, i2, c
def _rlabels(self):
"""
Retrieves the labels for :py:attr:`pos1`, :py:attr:`pos2` and
:py:attr:`cat`, for use with :py:meth:`__repr__`.
"""
i1 = '(' + ', '.join(map(repr,
self.solver.pos_to_item(self.pos1))) + ')'
i2 = '(' + ', '.join(map(repr,
self.solver.pos_to_item(self.pos2))) + ')'
c = repr(self.solver.categories[self.cat])
return i1, i2, c
class AsymmetricAssertionMixin:
"""
A mixin class for assertions that care about the direction of the
difference between keys.
"""
def value(self, key1, key2):
"""
Return the signed difference ``key2 - key1``.
| |
# -*- coding: utf-8 -*-.
"""
pygsheets.cell
~~~~~~~~~~~~~~
This module contains cell model
"""
# import warnings
from .custom_types import *
from .exceptions import (IncorrectCellLabel, CellNotFound, InvalidArgumentValue)
from .utils import format_addr
class Cell(object):
"""
An instance of this class represents a single cell. A cell can be simple or complex. A complex cell will update
all information on each value acess (more bandwidth).
in a :class:`worksheet <Worksheet>`.
:param pos: position of the cell adress
:param val: value of the cell
:param worksheet: worksheet this cell belongs to
:param cell_data: Data about the cell in json, corresponding to cellData of sheets api
"""
def __init__(self, pos, val='', worksheet=None, cell_data=None):
self._worksheet = worksheet
if type(pos) == str:
pos = format_addr(pos, 'tuple')
self._row, self._col = pos
self._label = format_addr(pos, 'label')
self._value = val # formated vlaue
self._unformated_value = val # unformated vlaue
self._formula = ''
self._note = ''
if self._worksheet is None:
self._linked = False
else:
self._linked = True
self._color = (1.0, 1.0, 1.0, 1.0)
self._simplecell = True # if format, notes etc wont be fetched on each update
self.format = (FormatType.CUSTOM, '')
"""tuple specifying data format (format type, pattern) or just format"""
self.text_format = {} # the text format as json
self.text_rotation = {} # the text rotation as json
self.horizondal_alignment = None
self.vertical_alignment = None
self.borders = {}
"""border properties as json, see gsheets api docs"""
self.parse_value = True
"""if set false, value will be shown as it is set"""
if cell_data:
self.set_json(cell_data)
@property
def row(self):
"""Row number of the cell."""
return self._row
@row.setter
def row(self, row):
if self._linked:
ncell = self._worksheet.cell((row, self.col))
self.__dict__.update(ncell.__dict__)
else:
self._row = row
self._label = format_addr((self._row, self._col), 'label')
@property
def col(self):
"""Column number of the cell."""
return self._col
@col.setter
def col(self, col):
if self._linked:
ncell = self._worksheet.cell((self._row, col))
self.__dict__.update(ncell.__dict__)
else:
self._col = col
self._label = format_addr((self._row, self._col), 'label')
@property
def label(self):
"""Cell Label - Eg A1"""
return self._label
@label.setter
def label(self, label):
if self._linked:
ncell = self._worksheet.cell(label)
self.__dict__.update(ncell.__dict__)
else:
self._label = label
self._row, self._col = format_addr(label, 'tuple')
@property
def value(self):
"""get/set formatted value of the cell"""
return self._value
@value.setter
def value(self, value):
self._value = value
if self._linked:
self._worksheet.update_cell(self.label, value, self.parse_value)
if not self._simplecell: # for unformated value and formula
self.fetch()
@property
def value_unformatted(self):
""" get unformatted value of the cell """
return self._unformated_value
@property
def formula(self):
"""get/set formula if any of the cell"""
if self._simplecell:
self.fetch()
return self._formula
@formula.setter
def formula(self, formula):
if not formula.startswith('='):
formula = "=" + formula
tmp = self.parse_value
self.parse_value = True
self.value = formula
self._formula = formula
self.parse_value = tmp
self.fetch()
@property
def note(self):
"""get/set note on the cell"""
if self._simplecell:
self.fetch()
return self._note
@note.setter
def note(self, note):
if self._simplecell:
self.fetch()
self._note = note
self.update()
@property
def color(self):
"""get/set background color of the cell as tuple (red, green, blue, alpha)"""
if self._simplecell:
self.fetch()
return self._color
@color.setter
def color(self, value):
if self._simplecell:
self.fetch()
if type(value) is tuple:
if len(value) < 4:
value = list(value) + [1.0]*(4-len(value))
else:
value = (value, 1.0, 1.0, 1.0)
for c in value:
if c < 0 or c > 1:
raise InvalidArgumentValue("Color should be in range 0-1")
self._color = tuple(value)
self.update()
@property
def simple(self):
"""If this cell is simple. Simple cells will only fetch value, else it
would fetch all the cell attributes"""
return self._simplecell
@simple.setter
def simple(self, value):
self._simplecell = value
def set_text_format(self, attribute, value):
"""
set the text format
:param attribute: one of the following "foregroundColor" "fontFamily", "fontSize", "bold", "italic",
"strikethrough", "underline"
:param value: corresponding value for the attribute
:return: :class: Cell
"""
if self._simplecell:
self.fetch()
if attribute not in ["foregroundColor", "fontFamily", "fontSize", "bold", "italic",
"strikethrough", "underline"]:
raise InvalidArgumentValue("not a valid argument, please see the docs")
self.text_format[attribute] = value
self.update()
return self
def set_text_rotation(self, attribute, value):
"""
set the text rotation
:param attribute: "angle" or "vertical"
:param value: corresponding value for the attribute. angle in (-90,90) for 'angle', boolean for 'vertical'
:return: :class:`cell <Cell>`
"""
if self._simplecell:
self.fetch()
if attribute not in ["angle", "vertical"]:
raise InvalidArgumentValue("not a valid argument, please see the docs")
if attribute == "angle":
if type(value) != int:
raise InvalidArgumentValue("angle value must be of type int")
if value not in range(-90, 91):
raise InvalidArgumentValue("angle value range must be between -90 and 90")
if attribute == "vertical":
if type(value) != bool:
raise InvalidArgumentValue("vertical value must be of type bool")
self.text_rotation = {attribute: value}
self.update()
return self
def set_text_alignment(self, alignment, direction=None):
"""
set text alignment in both the directions
:param alignment: either LEFT, CENTER, RIGHT, TOP, MIDDLE, BOTTOM, None
:param direction: Verical or horizondal; mandatory only if alignment is None
"""
if alignment in ["LEFT", "CENTER", "RIGHT"]:
self.horizondal_alignment = alignment
elif alignment in ["TOP", "MIDDLE", "BOTTOM"]:
self.vertical_alignment = alignment
elif alignment is None:
if direction == "vertical":
self.vertical_alignment = None
elif direction == "horizondal":
self.horizondal_alignment = None
else:
raise InvalidArgumentValue("direction")
else:
raise InvalidArgumentValue("alignment")
self.update()
return self
def unlink(self):
"""unlink the cell from worksheet. Unliked cells wont updated if any properties are changed.
you have to lihnk again or call update to sync all changes values"""
self._linked = False
return self
def link(self, worksheet=None, update=False):
"""
link cell with a worksheet. Linked sheets will be updated instantanoulsy if any properties are changed
These are most helpful if you are using a python terminal.
:param worksheet: the worksheet to link to
:param update: if the cell should be synces as after linking
:return: :class:`cell <Cell>`
"""
if worksheet is None and self._worksheet is None:
raise InvalidArgumentValue("Worksheet not set for uplink")
self._linked = True
if worksheet:
self._worksheet = worksheet
if update:
self.update()
return self
def neighbour(self, position):
"""
get a neighbouring cell of this cell
:param position: a tuple of relative position of position as string as
right, left, top, bottom or combinatoin
:return: :class:`neighbouring cell <Cell>`
"""
if not self._linked:
return False
addr = [self.row, self.col]
if type(position) == tuple:
addr = (addr[0] + position[0], addr[1] + position[1])
elif type(position) == str:
if "right" in position:
addr[1] += 1
if "left" in position:
addr[1] -= 1
if "top" in position:
addr[0] -= 1
if "bottom" in position:
addr[0] += 1
try:
ncell = self._worksheet.cell(tuple(addr))
except IncorrectCellLabel:
raise CellNotFound
return ncell
def fetch(self, keep_simple=False):
""" Update the value of the cell from sheet """
if not keep_simple: self._simplecell = False
if self._linked:
self._value = self._worksheet.cell(self._label).value
result = self._worksheet.client.sh_get_ssheet(self._worksheet.spreadsheet.id, fields='sheets/data/rowData',
include_data=True,
ranges=self._worksheet._get_range(self.label))
try:
result = result['sheets'][0]['data'][0]['rowData'][0]['values'][0]
except (KeyError, IndexError):
result = dict()
self.set_json(result)
return self
else:
return False
def update(self, force=False):
"""
update the sheet cell value with the attributes set
:param force: update the cell even if its unlinked
"""
if not self._linked and not force:
return False
self._simplecell = False
request = {
"repeatCell": {
"range": {
"sheetId": self._worksheet.id,
"startRowIndex": self.row - 1,
"endRowIndex": self.row,
"startColumnIndex": self.col - 1,
"endColumnIndex": self.col
},
"cell": self.get_json(),
"fields": "userEnteredFormat, note"
}
}
self._worksheet.client.sh_batch_update(self._worksheet.spreadsheet.id, request, None, False)
self.value = self._value # @TODO combine to above?
def get_json(self):
"""get the json representation of the cell as per google api"""
try:
nformat, pattern = self.format
except TypeError:
nformat, pattern = self.format, ""
return {"userEnteredFormat": {
"numberFormat": {
"type": getattr(nformat, 'value', nformat),
"pattern": pattern
},
"backgroundColor": {
"red": self._color[0],
"green": self._color[1],
"blue": self._color[2],
"alpha": self._color[3],
},
"textFormat": self.text_format,
"borders": self.borders,
"textRotation": self.text_rotation,
"horizontalAlignment": self.horizondal_alignment,
"verticalAlignment": self.vertical_alignment
},
"note": self._note,
}
def set_json(self, cell_data):
"""
set the cell data from json obj of the cell as per google api
:param cell_data: json data about cell
"""
self._value = cell_data.get('formattedValue', '')
try:
self._unformated_value = list(cell_data['effectiveValue'].values())[0]
except KeyError:
self._unformated_value = ''
self._formula = cell_data.get('userEnteredValue', {}).get('formulaValue', '')
self._note = cell_data.get('note', '')
nformat = cell_data.get('userEnteredFormat', {}).get('numberFormat', {})
self.format = (nformat.get('type', FormatType.CUSTOM), nformat.get('pattern', ''))
color = cell_data.get('userEnteredFormat', {}) \
.get('backgroundColor', {'red': 1.0, 'green': 1.0, 'blue': 1.0, 'alpha': 1.0})
self._color = (color.get('red', 0), color.get('green', 0), color.get('blue', 0), color.get('alpha', 0))
self.text_format = cell_data.get('userEnteredFormat', {}).get('textFormat', {})
self.text_rotation = cell_data.get('userEnteredFormat', {}).get('textRotation', {})
self.borders = cell_data.get('userEnteredFormat', {}).get('borders', {})
def __eq__(self, other):
if self._worksheet is | |
(symbol, count))
elif period == 30:
count: int = 0
i: int = 0
loop_len = len(stock_day) - 1
while i < loop_len:
timestamp: pd.Timestamp = pd.to_datetime(stock_day[i])
time_point = datetime.datetime(year=timestamp.year, month=timestamp.month, day=timestamp.day,
hour=timestamp.hour, minute=timestamp.minute, second=timestamp.second)
open_time = datetime.datetime.combine(datetime.date(year=timestamp.year, month=timestamp.month, day=timestamp.day),
DataContext.marketopentime)
count_period = (time_point - open_time).seconds // (15 * 60)
if i == 0 and (count_period % 2) == 0:
i += 1
continue
next_idx = i + 1
open_value = stock_open[i]
close_value = stock_close[next_idx]
if stock_high[i] >= stock_high[next_idx]:
high_value = stock_high[i]
else:
high_value = stock_high[next_idx]
if stock_low[i] <= stock_low[next_idx]:
low_value = stock_low[i]
else:
low_value = stock_low[next_idx]
volume_value = stock_volume[i] + stock_volume[next_idx]
i += 2
if DataContext.iscountryUS():
csr.execute(statement_start + exchange + "_tbl_30 (gid,crt_time,open,close,high,low,volume) " +
"values (%s,%s,%s,%s,%s,%s,%s) on conflict on constraint time_key_" + exchange + "_30" + update_stat,
(str(symbol), str(stock_day[next_idx]), "{:.4f}".format(open_value), "{:.4f}".format(close_value),
"{:.4f}".format(high_value), "{:.4f}".format(low_value), str(volume_value)))
elif DataContext.iscountryChina():
csr.execute(statement_start + exchange + "_tbl_30 (gid,crt_time,open,close,high,low,volume) " +
"values (%s,%s,%s,%s,%s,%s,%s) on conflict on constraint time_key_" + exchange + "_30" + update_stat,
(str(symbol), str(stock_day[next_idx]), str(open_value), str(close_value),
str(high_value), str(low_value), str(volume_value)))
transientdf.loc[len(transientdf)] = [str(symbol), open_value, close_value, high_value, low_value,
str(volume_value), stock_day[next_idx], nan]
count += 1
if transientdf is not None:
transientdf.set_index('time', inplace=True)
conn.commit()
logger.debug("%s - rows are %d for period 30 mins" % (symbol, count))
elif period == 60 and DataContext.iscountryChina():
count: int = 0
transientdf.sort_index(inplace=True)
stock_day = transientdf.index.tolist()
stock_open = transientdf['open']
stock_close = transientdf['close']
stock_high = transientdf['high']
stock_low = transientdf['low']
stock_volume = list(map(int, transientdf['volume'].tolist()))
i: int = 0
loop_len = len(stock_day) - 1
while i < loop_len:
next_idx = i + 1
open_value = stock_open[i]
close_value = stock_close[next_idx]
if stock_high[i] >= stock_high[next_idx]:
high_value = stock_high[i]
else:
high_value = stock_high[next_idx]
if stock_low[i] <= stock_low[next_idx]:
low_value = stock_low[i]
else:
low_value = stock_low[next_idx]
volume_value = stock_volume[i] + stock_volume[next_idx]
i += 2
csr.execute(statement_start + exchange + "_tbl_60 (gid,crt_time,open,close,high,low,volume) " +
"values (%s,%s,%s,%s,%s,%s,%s) on conflict on constraint time_key_" + exchange + "_60" + update_stat,
(str(symbol), str(stock_day[next_idx]), str(open_value), str(close_value),
str(high_value), str(low_value), str(volume_value)))
count += 1
conn.commit()
logger.debug("%s - rows are %d for period 60 mins" % (symbol, count))
elif period == 240 and DataContext.iscountryChina():
count: int = 0
if type_func == 2:
for code, row in stock_df.iterrows():
if row[header_o] is not None and \
row[header_c] is not None and \
row[header_h] is not None and \
row[header_l] is not None and \
row[header_v] is not None and \
len(code.split('.')) > 1:
csr.execute(statement_start + exchange + "_tbl_240 (gid,crt_time,open,close,high,low,volume) " +
"values (%s,%s,%s,%s,%s,%s,%s) on conflict on constraint time_key_" + exchange + "_240" + update_stat,
(str(code.split('.')[0]), str(row[header_d]), str(row[header_o]), str(row[header_c]),
str(row[header_h]), str(row[header_l]), str(row[header_v])))
count += 1
else:
transientdf.sort_index(inplace=True)
stock_day = transientdf.index.tolist()
stock_open = transientdf['open']
stock_close = transientdf['close']
stock_high = transientdf['high']
stock_low = transientdf['low']
stock_volume = list(map(int, transientdf['volume'].tolist()))
i: int = 0
stock_day_len = len(stock_day)
# the transientdf contains data of 30 mins
abandoned_15_mins_count = stock_day_len % 8
if abandoned_15_mins_count != 0:
i += abandoned_15_mins_count
while i < stock_day_len:
last_index = i + 7
if last_index > stock_day_len - 1:
break
timestamp: pd.Timestamp = pd.to_datetime(stock_day[i])
time_point = datetime.datetime(year=timestamp.year, month=timestamp.month, day=timestamp.day)
open_value = stock_open[i]
close_value = stock_close[last_index]
high_value = float(stock_high[i])
low_value = float(stock_low[i])
volume_value = stock_volume[i]
i += 1
while i < last_index + 1:
if float(stock_high[i]) > high_value:
high_value = float(stock_high[i])
if float(stock_low[i]) < low_value:
low_value = float(stock_low[i])
volume_value += stock_volume[i]
i += 1
csr.execute(statement_start + exchange + "_tbl_240 (gid,crt_time,open,close,high,low,volume) " +
"values (%s,%s,%s,%s,%s,%s,%s) on conflict on constraint time_key_" + exchange + "_240" + update_stat,
(str(symbol), str(time_point), str(open_value), str(close_value),
str(high_value), str(low_value), str(volume_value)))
count += 1
if count != 0:
conn.commit()
logger.debug("%s - rows are %d for period 240 mins" % (exchange, count))
pbar = tqdm()
def insertdata(exchange: str, group: str, symbols: list, retried, datasource: DataSource, period: str = '15',
type_func=1, context: DataContext=None, adjust: str = "qfq"):
exchange_group = ",".join([exchange, group])
def update_database(exchange_in: str, symbol_s: str, dataset, source):
inserttab(exchange_in, symbol_s, dataset, source)
tmp_df = pd.DataFrame(columns=columns)
inserttab(exchange_in, symbol_s, dataset, source, period=30, transientdf=tmp_df)
inserttab(exchange_in, symbol_s, dataset, source, period=60, transientdf=tmp_df)
inserttab(exchange_in, symbol_s, dataset, source, period=240, transientdf=tmp_df)
if DataContext.iscountryChina():
evt = threading.Event()
if type_func == 1:
global pbar
pbar.total = len(symbols)
pbar.set_description_str(f'{exchange} Processing')
for symbol_i in symbols:
if datasource == DataSource.AK_SHARE:
symbol_internal = group + str(symbol_i)
# stock_zh_df_tmp = ak.stock_zh_a_minute(symbol=symbol_internal, period=period, adjust=adjust)
# FIXME
try:
time.sleep(1)
stock_zh_df_tmp = ak.stock_zh_a_minute(symbol=symbol_internal, period=period, datalengh="16")
except:
stock_zh_df_tmp = pd.DataFrame(columns=columns)
failed_to_get_data_symbols.append((group, str(symbol_i)))
logger.error("it is failed to get stock data for {}".format(symbol_internal))
elif datasource == DataSource.EAST_MONEY:
symbol_internal = ".".join([str(symbol_i), group])
stock_zh_df_tmp = c.cmc(symbol_internal, "OPEN,HIGH,LOW,CLOSE,VOLUME,TIME",
(datetime.datetime.today() - datetime.timedelta(days=3)).strftime(
"%Y-%m-%d"),
datetime.datetime.today().strftime("%Y-%m-%d"),
"AdjustFlag=1,RowIndex=2,Period=15,IsHistory=1,Ispandas=1")
if isinstance(stock_zh_df_tmp, c.EmQuantData) and stock_zh_df_tmp.ErrorCode != 0:
logger.error(
"it is failed to get stock data for {} {} and error code is {} error message is {}".
format(symbol_i, exchange_group, stock_zh_df_tmp.ErrorCode, stock_zh_df_tmp.ErrorMsg))
if stock_zh_df_tmp.ErrorMsg.find('service error') != -1 or \
stock_zh_df_tmp.ErrorCode == 10002011 or \
stock_zh_df_tmp.ErrorCode == 10002010 or \
stock_zh_df_tmp.ErrorCode == 10002004:
append_value(retried, exchange_group, symbol_i)
elif datasource == DataSource.EFINANCE:
freq = 15
stock_zh_df_tmp: pd.DataFrame = ef.stock.get_quote_history(
str(symbol_i), klt=freq,
beg=(datetime.datetime.today() - datetime.timedelta(days=0)).strftime("%Y%m%d"),
end=(datetime.datetime.today() - datetime.timedelta(days=0)).strftime("%Y%m%d"),
fqt=0)
if isinstance(stock_zh_df_tmp, pd.DataFrame) and len(stock_zh_df_tmp) > 0:
update_database(exchange, symbol_i, stock_zh_df_tmp, datasource)
if datasource == DataSource.AK_SHARE:
# watchdog for fetching stock data
global queue_history_data
queue_history_data.put(((group, str(symbol_i)), evt))
evt.wait()
pbar.update(1)
pbar.set_description_str(f'{exchange} Processing => {symbol_i}')
elif type_func == 5:
for symbol_i in symbols:
dataframe_context: pd.DataFrame = context.data15mins[exchange].get(symbol_i)
df_today = pd.DataFrame(columns=['gid', 'open', 'close', 'high', 'low', 'volume'])
index_list = dataframe_context.index.tolist()
total_len = 16
i = - total_len
while i < 0:
row = dataframe_context.iloc[i]
df_today.loc[index_list[i]] = [str(symbol_i), row['open'], row['close'],
row['high'], row['low'], row['volume']]
i += 1
update_database(exchange, symbol_i, df_today, datasource)
# EM has been obsoleted.
elif type_func == 2:
symbol_internals = []
for symbol_i in symbols:
symbol_internals.append(".".join([str(symbol_i), group]))
if group == "SZ":
market = "CNSESZ"
else:
market = "CNSESH"
stock_zh_df_tmp = c.csd(symbol_internals, "OPEN,HIGH,LOW,CLOSE,VOLUME,TIME",
(datetime.datetime.today() - datetime.timedelta(days=3)).strftime("%Y-%m-%d"),
datetime.datetime.today().strftime("%Y-%m-%d"),
"AdjustFlag=1,RowIndex=1,Period=1,Ispandas=1,Market=%s" % market)
if isinstance(stock_zh_df_tmp, c.EmQuantData) and stock_zh_df_tmp.ErrorCode != 0:
logger.error(
"it is failed to get stock data for {} and error code is {} error message is {}".
format(exchange_group, stock_zh_df_tmp.ErrorCode, stock_zh_df_tmp.ErrorMsg))
elif isinstance(stock_zh_df_tmp, pd.DataFrame):
inserttab(exchange, "", stock_zh_df_tmp, datasource, period=240, type_func=type_func)
elif DataContext.iscountryUS():
for symbol_i in symbols:
stock_us_df_tmp = yf.download(tickers=symbol_i, auto_adjust=True, period="10d", interval="15m")
if isinstance(stock_us_df_tmp, pd.DataFrame):
inserttab(exchange, symbol_i, stock_us_df_tmp, datasource)
inserttab(exchange, symbol_i, stock_us_df_tmp, datasource, period=30)
def insertdata_continue(exchange: str, group: str, symbols: list, c_point: str, retried, datasource: DataSource,
period: str = '15', type_func=1, adjust: str = "qfq"):
pos = (pd.Series(symbols) == c_point).argmax() + 1
insertdata(exchange, group, symbols[pos:], retried, datasource, period, type_func)
def insertdata_with_snapshot(exchange:str, group:str, symbols:list, context: DataContext, datasource: DataSource):
insertdata(exchange, group, symbols, {}, datasource, type_func=5, context=context)
def loaddatalocked(indicator: str, exchange: str, symbols: list, operation: int, type_func=1,
datasource=DataSource.AK_SHARE, c_point='', retried={}, period=15,
context: DataContext=None):
group = stock_group[indicator]
if operation == 1:
createtable(symbols, group, period)
elif operation == 2:
insertdata(group, exchange, symbols, retried, datasource, "%d" % period, type_func)
elif operation == 3:
insertdata_continue(group, exchange, symbols, c_point, retried, datasource, "%d" % period, type_func)
elif operation == 4:
droptable(symbols, group)
elif operation == 5:
insertdata_with_snapshot(group, exchange, symbols, context, datasource)
def normalizeticker(symbols: pd.Series) -> pd.Series:
symbols_dict = {}
dict_count = 0
for ticker in symbols:
ticker_str = str(ticker)
diff = 6 - len(ticker_str)
if diff > 0:
prefix = ''
count = 0
while count < diff:
prefix += '0'
count += 1
new_ticker = prefix + ticker_str
symbols_dict[dict_count] = new_ticker
dict_count += 1
new_symbols = pd.Series(symbols_dict)
return new_symbols
def selectgroup(indicator: str):
symbol_path = symbol_paths[stock_group[indicator]]
if pathlib.Path(symbol_path).is_file():
symbolsfromcsv = pd.read_csv(symbol_path)
else:
logger.error("The file {} doesn't exist".format(symbol_path))
exit()
if DataContext.iscountryChina():
if indicator in {"中小企业板", "创业板", "主板"}:
header = "公司代码"
group = 'SZ'
if indicator in {"中小企业板", "主板"}:
returndata = normalizeticker(symbolsfromcsv[header]).tolist()
else:
returndata = symbolsfromcsv[header].tolist()
if indicator in {"科创板", "主板A股"}:
header = 'SECURITY_CODE_A'
group = 'SH'
returndata = symbolsfromcsv[header].tolist()
elif DataContext.iscountryUS():
if indicator == "NASDAQ":
group = 'O'
elif indicator == "NYSE":
group = 'N'
elif indicator == "AMEX":
group = 'A'
symbol_group = symbolsfromcsv['SECURITY_CODE_A'].tolist()
returndata = [symbol_us.split('.')[0] for symbol_us in symbol_group if len(symbol_us.split('.')) > 1]
return group, returndata
def loaddata(indicators, operation: int, c_point='', datasource: DataSource = DataSource.AK_SHARE, period=15, type_func=1, isloginAlready=False):
retriedStocks = {}
if datasource == DataSource.EAST_MONEY and not isloginAlready:
login_em()
try:
loaddatainternal(indicators, operation, type_func, c_point, retriedStocks, datasource, period)
if datasource == DataSource.EAST_MONEY and type_func == 1:
reloaddata(retriedStocks)
finally:
| |
from collections import OrderedDict
from functools import partial
from inspect import Parameter, Signature, signature
import opcode
import sys
from types import CodeType, FunctionType, SimpleNamespace
from typing import (
cast, Any, Callable, Dict, Generic, Iterable, List, NamedTuple, NoReturn,
Optional, Tuple, TYPE_CHECKING, Type, TypeVar, Union
)
__version__ = '2.0rc1'
if TYPE_CHECKING:
from typing_extensions import Literal, Protocol
ExceptionPref = Literal['unwanted', 'accepted', 'required']
class StateProtocol(Protocol):
"""Typing protocol for the state objects of chains.
"""
exception: Optional[Exception]
else:
ExceptionPref = str
StateProtocol = None
State = TypeVar('State', bound=StateProtocol)
ChainFunction = Callable
ChainFunctionRef = Union[ChainFunction, str]
Func = TypeVar('Func', bound=Callable)
T = TypeVar('T')
class _LoopState:
__slots__ = ('i', 'prev_func')
def __init__(self) -> None:
self.i: int = 0
self.prev_func: Optional[ChainFunction] = None
class _ChainLink(NamedTuple):
function: ChainFunction
exception_pref: ExceptionPref
signature: Optional[Signature]
class _FunctionMapValue:
__slots__ = ('function', 'position')
def __init__(self, function: ChainFunction, position: Optional[int]):
self.function = function
self.position = position
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.function!r}, {self.position!r})"
class Object(SimpleNamespace):
"""The default type of a chain's :obj:`state` object.
A namespace that supports both attribute-style and dict-style lookups and
assignments. This is similar to a JavaScript object, hence the name.
"""
def __init__(
self,
*d: Union[Dict[str, Any], Iterable[Tuple[str, Any]]],
**kw: Dict[str, Any],
) -> None:
self.__dict__.update(*d, **kw)
def __contains__(self, key: str) -> bool:
return key in self.__dict__
def __getitem__(self, key: str) -> Any:
return self.__dict__[key]
def __setitem__(self, key: str, value: Any) -> None:
self.__dict__[key] = value
class StateChain(Generic[State]):
"""Model an algorithm as a list of functions operating on a shared state.
:param type state_type: the type of the state object
:param functions: a sequence of functions in the order they are to be run
:param bool raise_immediately: default value for the `raise_immediately`
argument of the :meth:`run` method
:param str exception_preference: default value for the `exception` argument
of the :meth:`add` method
"""
__slots__ = (
'state_type', 'raise_immediately', 'exception_preference', '_functions',
'_functions_map', '__dict__',
)
def __init__(
self,
state_type: Type[State] = cast(Type[State], Object),
functions: Iterable[ChainFunction] = (),
raise_immediately: bool = False,
exception_preference: ExceptionPref = 'unwanted',
):
self.state_type = state_type
self.exception_preference = exception_preference
self._functions: Tuple[_ChainLink, ...] = ()
self._functions_map: Dict[str, _FunctionMapValue] = {}
self.add(*functions)
self.raise_immediately = raise_immediately
@property
def functions(self) -> Tuple[ChainFunction, ...]:
return tuple(link.function for link in self._functions)
@functions.setter
def functions(self, new_list: Any) -> NoReturn:
raise AttributeError(
"You should use the `modify()` method to customize a state chain. "
"See https://state-chain-py.readthedocs.io/ for details."
)
def copy(self) -> 'StateChain':
"""Returns a copy of this chain.
"""
r = StateChain(self.state_type, raise_immediately=self.raise_immediately)
r._functions = self._functions
r._functions_map = self._functions_map.copy()
r.__dict__ = self.__dict__.copy()
return r
def run(
self,
state: Optional[State] = None,
raise_immediately: Optional[bool] = None,
return_after: Optional[str] = None,
) -> State:
"""Run through the functions in the :attr:`functions` list.
:param State state: the initial state object for this run of the chain
(`self.state_type()` is called to create an object if none is provided)
:param bool raise_immediately: if not ``None``, will override any
default for ``raise_immediately`` that was set in the constructor
:param str return_after: if not ``None``, return after calling the function
with this name
:raises: :exc:`FunctionNotFound`, if there is no function named
``return_after``
:returns: the ``state`` object
For each function in the :attr:`functions` list, we look at the
function's exception preference and at the current value of
``state.exception``. If ``state.exception`` is ``None``, then we skip
any function whose exception preference is :obj:`'required'`, and if
``state.exception`` is *not* ``None`` then we only call functions whose
exception preference is not :obj:`'unwanted'`. The upshot is that any
function that raises an exception will cause us to fast-forward to the
next exception-handling function in the list.
Here are some further notes on exception handling:
- If a function's exception preference is :attr:`'accepted'`, then that
function will be called whether or not there is an exception being
handled.
- You should set ``state.exception = None`` when an exception has been
handled. The chain run will resume normally from where it is (it
won't backtrack to run the functions that were skipped during
exception handling).
- If an exception is raised by a function handling another exception,
then ``state.exception`` is set to the new one and we look for the
next exception handler.
- If ``state.exception`` is not ``None`` after all functions have been
run, then we re-raise it.
- If ``raise_immediately`` evaluates to ``True`` (looking first at the
``raise_immediately`` argument and falling back to the chain's
``raise_immediately`` attribute), then we re-raise any exception
immediately instead of fast-forwarding to the next exception handler.
- When an exception occurs, the chain functions that handle it are
called from inside the ``except:`` block, so you can access
``sys.exc_info`` (which contains the traceback).
"""
if state is None:
state = self.state_type()
if raise_immediately is None:
raise_immediately = self.raise_immediately
return_after = self[return_after] if return_after else None
if not hasattr(state, 'exception'):
state.exception = None
functions = self._functions
j = len(functions)
loop_state = _LoopState()
def loop(
# The first two arguments are for mypy's benefit.
state: State,
return_after: Optional[ChainFunction],
in_except: bool
) -> None:
while loop_state.i < j:
function, exception_pref, sig = functions[loop_state.i]
loop_state.i += 1
if return_after:
if loop_state.prev_func is return_after:
break
loop_state.prev_func = function
if in_except:
# Skip when function doesn't want exception but we have it.
if exception_pref == 'unwanted':
continue
else:
# Skip when function wants exception but we don't have it.
if exception_pref == 'required':
continue
try:
if sig:
call(function, state, sig)
else:
function(state)
if in_except and state.exception is None:
# exception is cleared, return to normal flow
return
except Exception as e:
if raise_immediately:
raise
state.exception = e
loop(state, return_after, True)
if in_except:
# an exception occurred while we were handling another
# exception, but now it's been cleared, so we return to
# the normal flow
return
if state.exception:
raise state.exception # exception hasn't been handled, reraise
loop(state, return_after, state.exception is not None)
return state
def __contains__(self, func_ref: ChainFunctionRef) -> bool:
if isinstance(func_ref, str):
return func_ref in self._functions_map
try:
return self._functions_map[func_ref.__name__].function is func_ref
except KeyError:
return False
def __getitem__(self, name: str) -> ChainFunction:
"""Return the function in the :attr:`functions` list named ``name``, or raise
:exc:`FunctionNotFound`.
>>> def foo(): pass
>>> algo = StateChain(functions=[foo])
>>> algo['foo'] is foo
True
>>> algo['bar']
Traceback (most recent call last):
...
state_chain.FunctionNotFound: The function 'bar' isn't in this state chain.
"""
v = self._functions_map.get(name)
if v is None:
raise FunctionNotFound(name)
return v.function
def get_names(self) -> List[str]:
"""Returns a list of the names of the functions in the :attr:`functions` list.
"""
return [f.__name__ for f in self.functions]
def add(
self,
*funcs: ChainFunction,
position: Optional[int] = None,
exception: Optional[ExceptionPref] = None,
alias: Optional[str] = None,
) -> Optional[ChainFunction]:
"""Insert functions into the chain.
:param funcs: the function(s) to add to the chain
:param int position: where to insert the function in the chain
:param str exception: determines when this function will be run or skipped.
The valid values are: 'unwanted', 'accepted', and 'required'.
:param str alias: one or more alternative names for the function being added,
separated by whitespace
:raises: :exc:`TypeError` if an element of the ``funcs`` list isn't a callable,
or if the ``alias`` argument is provided when adding multiple functions
>>> from types import SimpleNamespace
>>> algo = StateChain(SimpleNamespace)
>>> @algo.add
... def foo(): pass
>>> @algo.add(position=0)
... def bar(): pass
>>> algo.get_names()
['bar', 'foo']
>>> @algo.add(position=algo.after('bar'), exception='accepted')
... def baz(): pass
>>> algo.get_names()
['bar', 'baz', 'foo']
>>> @algo.add(position=algo.before('bar'), exception='required')
... def bal(): pass
>>> algo.get_names()
['bal', 'bar', 'baz', 'foo']
Of course, the method doesn't have to be used as a decorator:
>>> def bah(): pass
>>> algo.add(bah, position=0)
<function bah at ...>
>>> algo.get_names()
['bah', 'bal', 'bar', 'baz', 'foo']
"""
if not funcs:
return partial(self.add, position=position, exception=exception, alias=alias)
for f in funcs:
if not callable(f):
raise TypeError("Not a function: " + repr(f))
func_tuples = tuple(self._make_chain_link(f, exception) for f in funcs)
if position is None:
position = len(self._functions)
self._functions += func_tuples
else:
after = self._functions[position:]
self._functions = (
self._functions[:position] + func_tuples + after
)
offset = len(funcs)
for link in after:
func_name = link.function.__name__
v = self._functions_map[func_name]
| |
# MIT License
#
# Copyright (c) 2020 <NAME> <tony[dot]wu(at)nyu[dot]edu>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import sqlite3
from pathlib import Path
from urllib.parse import quote, unquote
from ..sql.utils import offset_fetch
from ..utils import pathsafe
from .exporters import MappingCSVExporter, MappingLineExporter
from .utils import build_where_clause, with_db
log = logging.getLogger('exporter.url')
def build_ctes(select):
urlexpansions = []
url_tables = ('feed', 'source', 'target')
url_attrs = ['scheme', 'netloc', 'path', 'query']
for attr in url_attrs:
urlexpansions.append(f"""urlsplit(url.url, '{attr}') AS "{attr}" """)
url_attrs.append('url')
urlexpansions = ', '.join(urlexpansions)
dateexpansions = []
date_attrs = ['year', 'month', 'day', 'hour', 'minute', 'second']
date_substr = [(1, 4), (6, 2), (9, 2), (12, 2), (15, 2), (18, 2)]
for attr, range_ in zip(date_attrs, date_substr):
start, length = range_
dateexpansions.append(f"""CAST(substr(item.published, {start}, {length}) AS INTEGER) AS "{attr}" """)
date_attrs.append('date')
dateexpansions = ', '.join(dateexpansions)
column_maps = {}
for table in url_tables:
for attr in url_attrs:
column_maps[f'{table}:{attr}'] = f'{table}.{attr}'
for attr in date_attrs:
column_maps[f'published:{attr}'] = f'items.{attr}'
column_maps['tag'] = 'hyperlink.element'
column_maps['source:title'] = 'items.title'
column_maps['feed:title'] = 'feed_info.title'
column_maps['feed:isdead'] = 'feed_info.dead'
return select % {'urlexpansions': urlexpansions, 'dateexpansions': dateexpansions}, column_maps
CTE = """
WITH urlsplits AS (
SELECT
url.id AS id,
url.url AS url,
%(urlexpansions)s
FROM
url
),
items AS (
SELECT
item.url AS url,
item.source AS source,
item.title AS title,
item.author AS author,
item.published AS date,
%(dateexpansions)s
FROM
item
)
"""
SELECT = """
SELECT
%(columns)s
FROM
hyperlink
JOIN urlsplits AS source ON source.id == hyperlink.source_id
JOIN urlsplits AS target ON target.id == hyperlink.target_id
JOIN items ON hyperlink.source_id == items.url
JOIN feed AS feed_info ON items.source == feed_info.url_id
JOIN urlsplits AS feed ON items.source == feed.id
"""
@with_db
def export(
conn: sqlite3.Connection, wd: Path, output: Path, fmt='urls.txt',
include=None, exclude=None, key=None, format='lines', escape=None,
):
cte, column_maps = build_ctes(CTE)
if format == 'lines':
keys = (key,) if key else ('target:url',)
else:
keys = set(key.split(',')) if key else list(column_maps.keys())
where, values, _ = build_where_clause(include, exclude)
columns = ', '.join([f'{v} AS "{k}"' for k, v in column_maps.items()])
column_keys = ', '.join([f'"{k}"' for k in keys])
select = SELECT % {'columns': columns}
select = f'{cte}{select} WHERE %(offset)s AND {where} GROUP BY {column_keys}'
log.debug(select)
escape_func = {
'percent': quote,
'replace': pathsafe,
'unquote': unquote,
'unquote-replace': lambda s: pathsafe(unquote(s)),
}
escape_func = escape_func.get(escape)
formatters = {
'lines': (MappingLineExporter, (keys[0], output, fmt, escape_func)),
'csv': (MappingCSVExporter, (keys, output, fmt, escape_func)),
}
cls, args = formatters[format]
log.info('Reading database...')
with cls(*args) as exporter:
for row in offset_fetch(conn, select, 'hyperlink',
values=values, log=log, size=200000):
exporter.write(row)
log.info('Done.')
help_text = """
Select and export URLs in various formats.
Synopsis
--------
export ~urls~ -i <input> [**-o** ~name or template~] [[**+f|-f** ~filter~]...]
[**key=**~attrs...~] [**format=**~lines|csv~]
[**escape=**~none|percent|replace|unquote|unquote-replace~]
Description
-----------
This exporter lets you select and export URLs found in scraped data.
By default, it exports all URLs found in scraped HTML markups. You can export
other data such as dates or domain names by specifying the **key=** additional
option (see below).
If there already exist some exported data, running this exporter again will
append to existing data.
Options
-------
This exporter supports the following parameters, specified as `key=value` pairs,
in addition to the exporter options:
~format=lines|csv~
Output format. Default is ~lines~.
~key=[...]~
What data to export, specified as one or more comma-separated attribute
names (see **~Available attributes~**).
If format is ~lines~, you may only choose one attribute,
e.g. `key=target:netloc`.
If format is ~csv~, you may export multiple attributes,
e.g. `key=source:netloc,tag`
Default is ~target:url~ for ~lines~, and ~all attributes~ for ~csv~.
~escape=none|percent|replace|unquote|unquote-replace~
Escape filenames. This is useful if you want URL path names to be part
of the filename.
~percent~ will use URL percent-encodings. For example, space characters
will be encoded as `%20`.
~replace~ will aggressively replace all punctuations and characters not
in the ISO-8859-1 encoding with `-`.
~unquote~ is the inverse of ~percent~: replace all percent-encoded
characters with the original ones.
~unquote-replace~ first unquotes the filename, then uses ~replace~ on it
Default (when unspecified) is ~none~.
Example
-------
`python -m export urls -i input -o out.txt format=csv` \\
`key=source:netloc,tag`
Output Template
---------------
Instead of specifying a regular path for the **-o/--output** option, you may
also specify a naming template. This allows you to sort URLs to different files
based on some varying attributes such as domain name.
Templates are specified as Python %-format strings with named placeholders e.g.
`%(target:netloc)s.txt`. You can also use any modifier that Python supports,
such as `%(target:url).10s.txt`.
Examples
--------
`export urls ... -o "%(source:netloc)s.txt"`
Sorts URLs into files named with the domain name of the feed on which
the URL is found.
`export urls ... -o "%(target:netloc).6s-%(published:year)s.txt"`
Name files using domain name the hyperlink is pointing to and the date
info of scraped feed articles.
Slashes are also supported:
`export urls -i data -o "%(feed:title)s/%(tag)s/%(target:netloc)s.csv"`
will results in a folder hierarchy that may look like:
`./data/out/`
`xkcd.com/`
`img/`
`imgs.xkcd.com.csv`
`xkcd.com.csv`
`...`
`a/`
`itunes.apple.com.csv`
`www.barnesandnoble.com.csv`
`...`
See **~Available attributes~** for a list of available placeholders.
Filters
-------
You can filter URLs based on URL components such as domain names and protocols,
as well as feed attributes such as names and dates published.
Filters are specified using the **--include/--exclude** options
(shorthands **+f/-f**).
Each filter is a space-separated tuple ~attr predicate value~, where ~attr~ is
one of the **~available attributes~** to test against, ~value~ is the value for
testing, and ~predicate~ is one of the following:
~is~
Equivalent to `==`
~gt~, ~ge~, ~lt~, ~le~
For integer types (such as date values).
Equivalent to `>`, `>=`, `<`, `<=`.
~startswith~, ~endswith~, ~contains~
For string types.
Equivalent to `str.startswith`, `str.endswith`, and the `in` operator.
~under~
Only for domain name attributes (~...:netloc~)
True if the tested value is or is a subdomain of ~value~, and
False otherwise.
**+f/-f** can be specified multiple times to enable multiple filters. Only URLs
that pass all filters are exported.
Examples
--------
`export urls ... +f source:netloc is xkcd.com`
Select URLs that are found in markups from xkcd.com
`export urls ... -f target:netloc is google.com`
Select URLs that are NOT pointing to google.com
`export urls ... +f target:path startswith /wp-content`
Select URLs whose path components begin with "/wp-content".
Note that URL paths always include the leading / and are %-encoded
e.g. if you want to specify a path with spaces,
you will need to use `%20`.
`export urls ... \\`
` +f tag is img \\`
` +f source:netloc is staff.tumblr.com \\`
` +f target:netloc under media.tumblr.com \\`
` +f published:year lt 2017`
Select image URLs pointing to domains under "media.tumblr.com"
from posts from "staff.tumblr.com" that are before 2017.
Available attributes
--------------------
Each attribute is in the form of either ~object~ or ~object:key~.
Objects
-------
**URL objects:** ~source~, ~target~, ~feed~
~source~ is the URL to the webpage containing the HTML markup. It is
returned by Feedly.
~target~ is the URL found in HTML tags in the ~source~'s markup.
~feed~ is the feed URL.
(That is, ~source~, which is scraped from ~feed~, contains a
hyperlink that points to ~target~).
**Keys**: For each kind of URL object, the following keys are available:
~url~: The complete URL.
~scheme~: The protocol of the URL e.g. `http` and `https`.
~netloc~: The domain name of the URL e.g. `example.org`.
~path~: The path of the URL, with the beginning slash.
~query~: Query string of the URL without `?`, if any
(These are the attribute names from the
~`urllib.parse.urlsplit`~ namedtuple.)
Example
-------
For a ~feed~ | |
self.getAddTool(pathname).getAdditionalMetaData(pathname)
additional.update(preferences)
nname = self.G.add_node(pathname, xpos=xpos, ypos=ypos, nodetype='base', **additional)
added.append(nname)
ypos += 50
if ypos == 450:
ypos = initialYpos
xpos += 50
if filename == baseImageFileName:
self.start = nname
self.end = None
except Exception as ex:
logging.getLogger('maskgen').warn('Failed to add media file {}'.format(filename))
self.notify(added, 'add')
def addImage(self, pathname, cgi=False, prnu=False, **kwargs):
maxx = 50
max_node = None
for node_id in self.G.get_nodes():
node = self.G.get_node(node_id)
if 'xpos' in node and int(node['xpos']) > maxx:
maxx = int(node['xpos'])
max_node = node
maxy = max_node['ypos'] + 50 if max_node is not None else 50
additional = self.getAddTool(pathname).getAdditionalMetaData(pathname)
additional.update(kwargs)
nname = self.G.add_node(pathname, nodetype='base',
cgi='yes' if cgi else 'no',
xpos=maxx,
ypos=maxy,
prnu='yes' if prnu else 'no',
**additional)
self.start = nname
self.end = None
self.notify([self.start], 'add')
return nname
def getEdgesBySemanticGroup(self):
"""
:return: association of semantics groups to edge id tuples (start,end)
@rtype: dict of list of tuple
"""
result = {}
for edgeid in self.getGraph().get_edges():
for grp in self.getSemanticGroups(edgeid[0], edgeid[1]):
if grp not in result:
result[grp] = [edgeid]
else:
result[grp].append(edgeid)
return result
def add_to_edge(self, **items):
self.G.update_edge(self.start, self.end, **items)
self.notify((self.start, self.end), 'update_edge')
def update_node(self, node_properties):
deleteImage(self.getStartImageFile())
self.G.update_node(self.start, **node_properties)
def update_edge(self, mod):
"""
:param mod:
:return:
@type mod: Modification
"""
op = self.gopLoader.getOperationWithGroups(mod.operationName,fake=True)
mod_old = self.getModificationForEdge(self.start, self.end)
trigger_update = False
for k,v in mod.arguments.iteritems():
if (k not in mod_old.arguments or mod_old.arguments[k] != v) and \
k in op.getTriggerUpdateArguments():
trigger_update = True
for k in mod_old.arguments:
if k not in mod.arguments and \
k in op.getTriggerUpdateArguments():
trigger_update = True
self.G.update_edge(self.start, self.end,
op=mod.operationName,
description=mod.additionalInfo,
arguments={k: v for k, v in mod.arguments.iteritems() if k != 'inputmaskname'},
recordMaskInComposite=mod.recordMaskInComposite,
semanticGroups=mod.semanticGroups,
editable='no' if (
mod.software is not None and mod.software.internal) or mod.operationName == 'Donor' else 'yes',
softwareName=('' if mod.software is None else mod.software.name),
softwareVersion=('' if mod.software is None else mod.software.version),
inputmaskname=mod.inputMaskName)
self._save_group(mod.operationName)
if trigger_update:
self.reproduceMask(force=False)
else:
self.notify((self.start, self.end), 'update_edge')
def compare(self, destination, arguments={}):
""" Compare the 'start' image node to the image node with the name in the 'destination' parameter.
Return both images, the mask and the analysis results (a dictionary)
"""
return self.getLinkTool(self.start, destination).compare(self.start, destination, self, arguments=arguments)
def getMetaDiff(self):
""" Return the EXIF differences between nodes referenced by 'start' and 'end'
Return the Frame meta-data differences between nodes referenced by 'start' and 'end'
"""
e = self.G.get_edge(self.start, self.end)
if e is None:
return None
videodiff = VideoMetaDiff(e['metadatadiff']) if getValue(e,'metadatadiff',None) is not None else None
imagediff = MetaDiff(e['exifdiff']) if 'exifdiff' in e and len(e['exifdiff']) > 0 else None
return imagediff if imagediff is not None else videodiff
def getDonorAndBaseNodeTuples(self):
"""
Return a tuple (edge, base node, list of nodes that for the path from edge to base)
for each valid donor path through the graph
"""
donorEdges = []
for edge_id in self.G.get_edges():
edge = self.G.get_edge(edge_id[0], edge_id[1])
if graph_rules.eligible_for_donor(edge):
donorEdges.append(edge_id)
results = []
for edge in donorEdges:
baseSet = self._findBaseNodesAndPaths(edge[0], excludeDonor=True)
for base in baseSet:
if (edge, base) not in results:
results.append((edge, base[0], base[1]))
if len(baseSet) == 0:
results.append((edge, None, list()))
for result in results:
result[2].reverse()
return results
def getTerminalAndBaseNodeTuples(self):
"""
Return a tuple (lead node, base node) for each valid (non-donor) path through the graph
"""
terminalNodes = [node for node in self.G.get_nodes() if
len(self.G.successors(node)) == 0 and len(self.G.predecessors(node)) > 0]
return [(node, self._findBaseNodes(node)) for node in terminalNodes]
def getEdges(self, endNode,excludeDonor=True):
"""
:param endNode: (identifier)
:return: tuple (start, end, edge map) for all edges ending in endNode
"""
return self._findEdgesWithCycleDetection(endNode, excludeDonor=excludeDonor, visitSet=list())
def getNodeNames(self):
return self.G.get_nodes()
def getCurrentNode(self):
return self.G.get_node(self.start)
def isEditableEdge(self, start, end):
e = self.G.get_edge(start, end)
return 'editable' not in e or e['editable'] == 'yes'
def findChild(self, parent, child):
for suc in self.G.successors(parent):
if suc == child or self.findChild(suc, child):
return True
return False
def compress(self, all=False,force=False):
if all:
return [self._compress(node) for node in self.G.get_nodes()]
else:
return self._compress(self.start, force=force)
def _compress(self, start, force=False):
defaults = {'compressor.video': 'maskgen.video_tools.x264',
'compressor.audio': None,
'compressor.image': None}
node = self.G.get_node(start)
ftype = self.getNodeFileType(start)
# cannot finish the action since the edge analysis was skipped
for skipped_edge in self.G.getDataItem('skipped_edges', []):
if skipped_edge['start'] == start and not force:
return
if (len(self.G.successors(start)) == 0 or len(self.G.predecessors(start)) == 0) and not force:
return
props = {'remove_video': False,'force': False}
#for pred in self.G.predecessors(start):
# edge = self.G.get_edge(pred, start)
# op = getOperationWithGroups(edge['op'], fake=True)
# if op.category == 'Audio':
# props['remove_video'] = True
compressor = prefLoader.get_key('compressor.' + ftype,
default_value=defaults['compressor.' + ftype])
if 'compressed' in node:
return
func = getRule(compressor)
newfile = None
if func is not None:
newfilename = func(os.path.join(self.get_dir(), node['file']), **props)
if newfilename is not None:
newfile = os.path.split(newfilename)[1]
self.G.update_node(start,file=newfile,compressed=compressor)
return newfile
def connect(self, destination, mod=Modification('Donor', '',category='Donor'), invert=False, sendNotifications=True,
skipDonorAnalysis=False):
""" Given a image node name, connect the new node to the end of the currently selected node.
Create the mask, inverting the mask if requested.
Send a notification to the register caller if requested.
Return an error message on failure, otherwise return None
"""
if self.start is None:
return ValidationMessage(Severity.ERROR,
self.start,
self.end,
Message="Node node selected",
Module=''), False
elif not self.G.has_node(destination):
return ValidationMessage(Severity.ERROR,
self.start,
self.end,
Message="Canvas out of state from model. Node Missing.",
Module=''), False
elif self.findChild(destination, self.start):
return ValidationMessage(Severity.ERROR,
self.start,
self.end,
Message="Cannot connect to ancestor node",
Module=''), False
else:
for successor in self.G.successors(self.start):
if successor == destination:
return ValidationMessage(Severity.ERROR,
self.start,
self.end,
Message="Cannot connect to the same node twice",
Module=''), False
return self._connectNextImage(destination,
mod,
invert=invert,
sendNotifications=sendNotifications,
skipDonorAnalysis=skipDonorAnalysis)
def getPredecessorNode(self):
if self.end is None:
for pred in self.G.predecessors(self.start):
edge = self.G.get_edge(pred, self.start)
if edge['op'] != 'Donor':
return pred
return self.start
def getBaseNode(self, node):
for pred in self.G.predecessors(node):
edge = self.G.get_edge(pred, node)
if edge['op'] != 'Donor':
return self.getBaseNode(pred)
return node
def getCreatingOperation(self, destination):
"""
:return: operation for the manipulation that created this destination and the start node
@rtype: (str,Operation)
"""
predecessors = self.G.predecessors(destination)
for pred in predecessors:
pred_edge = self.G.get_edge(pred, destination)
edge_op = self.gopLoader.getOperationWithGroups(pred_edge['op'])
if edge_op is not None and pred_edge['op'] != 'Donor':
return pred, edge_op
def getDonorAndBaseImage(self):
"""
Get the donor image and associated baseImage for the selected node.
"""
nodeName = self.start if self.end is None else self.end
# verify the node is a leaf node
endPointTuples = self.getDonorAndBaseNodeTuples()
for x in endPointTuples:
if nodeName == x[0][1]:
baseImage, _ = self.G.get_image(x[1])
donors = self.constructDonors()
for donortuple in donors:
if donortuple.base == x[1]:
if donortuple.media_type == 'video':
return video_tools.getSingleFrameFromMask(donortuple.mask_wrapper), baseImage
elif donortuple.media_type == 'audio':
return None, None
else:
return donortuple.mask_wrapper, baseImage
return None, None
def getTransformedMask(self):
"""
:return: list of CompositeImage
"""
composite_generator = mask_rules.prepareComposite((self.start, self.end),self.G, self.gopLoader, self.probeMaskMemory)
return composite_generator.constructComposites(checkEmptyMask=False)
def executeFinalNodeRules(self):
terminalNodes = [node for node in self.G.get_nodes() if
len(self.G.successors(node)) == 0 and len(self.G.predecessors(node)) > 0]
for node in terminalNodes:
graph_rules.setFinalNodeProperties(self, node)
def constructDonors(self):
"""
Construct donor images
Find all valid base node, leaf node tuples
:return computed donors in the form of tuples
(image node id donated to, base image node, ImageWrapper mask, filename)
@rtype list of DonorImage
"""
self._executeSkippedComparisons()
for edge_id in self.G.get_edges():
if self.start is not None and self.start != edge_id[1]:
continue
composite_generator = mask_rules.prepareComposite(edge_id, self.G, self.gopLoader, self.probeMaskMemory)
return composite_generator.constructDonors(saveImage=False)
return []
def invertInputMask(self):
"""
Temporary: Add missing input masks
:return:
"""
if self.start is not None and self.end is not None:
start_im = self.startImage()
edge = self.G.get_edge(self.start, self.end)
if edge is not None:
maskname= getValue(edge,'inputmaskname')
if maskname is not None:
mask = openImageMaskFile(self.get_dir(),maskname)
if mask is not None:
expected_shape = start_im.image_array.shape[0:2]
if expected_shape != mask.shape:
mask = cv2.resize(mask,tuple(reversed(expected_shape)))
mask = ImageWrapper(mask)
mask = mask.invert()
mask.save(os.path.join(self.get_dir(),maskname))
def fixInputMasks(self):
"""
Temporary: Add missing input masks
:return:
"""
for edge_id in self.G.get_edges():
edge = self.G.get_edge(edge_id[0], edge_id[1])
if graph_rules.missing_donor_inputmask(edge, self.G.dir):
startimage, name = self.G.get_image(edge_id[0])
finalimage, fname = self.G.get_image(edge_id[1])
mask = self.G.get_edge_image(edge_id[0], edge_id[1], 'maskname')
inputmaskname = os.path.splitext(name)[0]+ '_inputmask.png'
ImageWrapper(composeCloneMask(mask, startimage, finalimage)).save(inputmaskname)
# if 'arguments' not in edge:
# edge['arguments'] = {}
edge['inputmaskname'] = os.path.split(inputmaskname)[1]
# edge['arguments']['inputmaskname'] = os.path.split(inputmaskname)[1]
self.G.setDataItem('autopastecloneinputmask', 'yes')
def renametobase(self):
"""
Rename the project to match the name of the base image
:return:
"""
for nodeid in self.G.get_nodes():
node = self.G.get_node(nodeid)
if 'nodetype' in node and node['nodetype'] == 'base':
pos = node['file'].find('.')
| |
the method is called asynchronously,
returns the request thread.
"""
all_params = ['notification_id', 'expand']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_coaching_notification" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'notification_id' is set
if ('notification_id' not in params) or (params['notification_id'] is None):
raise ValueError("Missing the required parameter `notification_id` when calling `get_coaching_notification`")
resource_path = '/api/v2/coaching/notifications/{notificationId}'.replace('{format}', 'json')
path_params = {}
if 'notification_id' in params:
path_params['notificationId'] = params['notification_id']
query_params = {}
if 'expand' in params:
query_params['expand'] = params['expand']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CoachingNotification',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_coaching_notifications(self, **kwargs):
"""
Retrieve the list of your notifications.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_coaching_notifications(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page_number: Page number
:param int page_size: Page size
:param list[str] expand: Indicates a field in the response which should be expanded.
:return: CoachingNotificationList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_number', 'page_size', 'expand']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_coaching_notifications" % key
)
params[key] = val
del params['kwargs']
resource_path = '/api/v2/coaching/notifications'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'page_number' in params:
query_params['pageNumber'] = params['page_number']
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'expand' in params:
query_params['expand'] = params['expand']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CoachingNotificationList',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def patch_coaching_appointment(self, appointment_id, body, **kwargs):
"""
Update an existing appointment
Permission not required if you are the creator or facilitator of the appointment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_coaching_appointment(appointment_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str appointment_id: The ID of the coaching appointment. (required)
:param UpdateCoachingAppointmentRequest body: The new version of the appointment (required)
:return: CoachingAppointmentResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['appointment_id', 'body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_coaching_appointment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'appointment_id' is set
if ('appointment_id' not in params) or (params['appointment_id'] is None):
raise ValueError("Missing the required parameter `appointment_id` when calling `patch_coaching_appointment`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_coaching_appointment`")
resource_path = '/api/v2/coaching/appointments/{appointmentId}'.replace('{format}', 'json')
path_params = {}
if 'appointment_id' in params:
path_params['appointmentId'] = params['appointment_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CoachingAppointmentResponse',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def patch_coaching_appointment_annotation(self, appointment_id, annotation_id, body, **kwargs):
"""
Update an existing annotation.
You must have the appropriate permission for the type of annotation you are updating. Permission not required if you are the creator or facilitator of the appointment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_coaching_appointment_annotation(appointment_id, annotation_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str appointment_id: The ID of the coaching appointment. (required)
:param str annotation_id: The ID of the annotation. (required)
:param CoachingAnnotation body: The new version of the annotation (required)
:return: CoachingAnnotation
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['appointment_id', 'annotation_id', 'body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_coaching_appointment_annotation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'appointment_id' is set
if ('appointment_id' not in params) or (params['appointment_id'] is None):
raise ValueError("Missing the required parameter `appointment_id` when calling `patch_coaching_appointment_annotation`")
# verify the required parameter 'annotation_id' is set
if ('annotation_id' not in params) or (params['annotation_id'] is None):
raise ValueError("Missing the required parameter `annotation_id` when calling `patch_coaching_appointment_annotation`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_coaching_appointment_annotation`")
resource_path = '/api/v2/coaching/appointments/{appointmentId}/annotations/{annotationId}'.replace('{format}', 'json')
path_params = {}
if 'appointment_id' in params:
path_params['appointmentId'] = params['appointment_id']
if 'annotation_id' in params:
path_params['annotationId'] = params['annotation_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CoachingAnnotation',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def patch_coaching_appointment_status(self, appointment_id, body, **kwargs):
"""
Update the status of a coaching appointment
Permission not required if you are an attendee, creator or facilitator of the appointment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_coaching_appointment_status(appointment_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str appointment_id: The ID of the coaching appointment. (required)
:param CoachingAppointmentStatusRequest body: Updated status of the coaching appointment (required)
:return: CoachingAppointmentStatusResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['appointment_id', 'body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_coaching_appointment_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'appointment_id' is set
if ('appointment_id' not in params) or (params['appointment_id'] is None):
raise ValueError("Missing the required parameter `appointment_id` when calling `patch_coaching_appointment_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_coaching_appointment_status`")
resource_path = '/api/v2/coaching/appointments/{appointmentId}/status'.replace('{format}', 'json')
path_params = {}
if 'appointment_id' in params:
path_params['appointmentId'] = params['appointment_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CoachingAppointmentStatusResponse',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def patch_coaching_notification(self, notification_id, body, **kwargs):
"""
Update an existing notification.
Can only update your own notifications.
This method makes a synchronous HTTP request | |
= None
self.__pRkd = None
self.__pthres = None
self.__pslope = None
@Property
def thin ():
"Thinning: Subsample chains to reduce autocorrelation"
def fget (self):
return self.__thin
def fset (self,t):
self.__thin = t
# Set all values that depend on thin to None. This way, they are recomputed
# on access
self.__meanestimate = None
self.__meandeviance = None
self.__pRpd = None
self.__pRkd = None
self.__pthres = None
self.__pslope = None
@Property
def mcRpd ():
"Monte Carlo samples of posterior correlation between model predictions and data"
def fget (self):
"""Get samples from the posterior distribution of correlation between model prediction and deviance residuals"""
if self.__pRpd is None:
# pRpd is currently undefined
if len(self.__mcmc_chains) > 0:
# We have samples ~> recompute the correlations
self.__recomputeCorrelationsAndThresholds()
else:
raise NosamplesError, "Samples from the posterior have not yet been drawn"
return self.__pRpd
def fset (self, v):
pass
@Property
def mcRkd ():
"Monte Carlo samples of posterior correlation between bock index and data"
def fget (self):
"""Get samples from the posterior distribution of correlation between block index and deviance residuals"""
if self.__pRkd is None:
# pRkd is currently undefined
if len(self.__mcmc_chains) > 0:
# We have samples ~> recompute the correlations
self.__recomputeCorrelationsAndThresholds()
else:
raise NosamplesError, "Samples from the posterior have not yet been drawn"
return self.__pRkd
def fset (self, v):
pass
mcestimates = property ( fget=getsamples, doc="Monte Carlo samples from the posterior distribution of parameters" )
mcdeviance = property ( fget=getmcdeviance , doc="Deviances of monte carlo samples from the posterior" )
posterior_predictive = property ( fget=getppdata, doc="Posterior predictive data associated with the MCMC samples" )
ppdeviance = property ( fget=getppdeviance, doc="Deviances associated with the posterior predictive data" )
ppRpd = property ( fget=getppRpd, doc="Correlations between psychometric function and deviance residuals associated with posterior predictive data" )
ppRkd = property ( fget=getppRkd, doc="Correlations between block index and deviance residuals associated with posterior predictive data" )
@Property
def mcthres ():
"Monte Carlo Samples from the posterior distribution of thresholds"
def fget (self):
"""Get samples of the posterior distribution of thresholds"""
if self.__pthres is None:
# pthres is currently undefined
if len(self.__mcmc_chains) > 0:
# We have samples ~> recompute the thresholds
self.__recomputeCorrelationsAndThresholds()
else:
raise NosamplesError, "Samples from the posterior have not yet been drawn"
return self.__pthres
def fset (self, t):
pass
@Property
def mcslope ():
"Monte Carlo Samples from the posterior distribution of slopes"
def fget (self):
"""Get samples of the posterior distribution of slopes"""
if self.__pslope is None:
# pthres is currently undefined
if len(self.__mcmc_chains) > 0:
# We have samples ~> recompute the slope
self.__recomputeCorrelationsAndThresholds()
else:
raise NosamplesError, "Samples from the posterior have not yet been drawn"
return self.__pslope
def fset (self, t):
pass
@Property
def evidence ():
"""model evidence or marginal likelihood
Model evidence is typically given as the integral of the likelihood over the parameter space.
We replace the integral by a discrete sum over samples, such that we have
E = 1/N sum P(D|theta)
Model evidence is typically used in Bayesian model selection: If E1 is the evidence for model
1 and E2 is the evidence for model 2, then the ratio E1/E2 can be interpreted as "how much more
evidence is there for model 2 than for model 1".
"""
def fget (self):
dev = self.mcdeviance
return N.exp(-0.5*dev).mean()
@Property
def nullevidence ():
"""model evidence for the corresponding null model
This can be used for model selection: model evidence devided by null evidence gives the Bayes Factor
for the comparison of the model agains the null model. This can be interpreted as "how much more
probable is the given psychometric function than the null model for the present data. Also see the
documentation for the evidence property.
"""
def fget (self):
# The null deviance can be directly calculated
n = self.data[:,2].sum()
k = self.data[:,1].sum()
alpha,beta = 1.,1. # flat prior for the null model
fbeta = special.beta
return fbeta(k+alpha,n-k+beta) / ( (n+1)*fbeta(k+1,n-k+1) * fbeta(alpha,beta) )
@Property
def pD ():
"""effective number of parameters"""
def fget ( self ):
return self.mcdeviance.mean()-self.deviance
@Property
def DIC ():
"""Deviance information criterion
This is an information criterion based on the posterior distribution of deviance.
In contrast, to other information criteria, the deviance information criterion
determines the effective number of free parameters from the posterior distribution.
"""
def fget ( self ):
meandev = self.mcdeviance.mean()
return 2*meandev-self.deviance
@Property
def farstart ():
"""A proper starting value for the Rhat statistic
This is a starting value for the mcmc process, that is relatively far away from the posterior density.
In order to have a reasonably interpretable Rhat statistic. There should be multiple chains and these chains
should have overdispersed starting values. farstart will always correspond to an overdispersed starting value.
"""
def fget ( self ):
k = N.random.randint(2)
l = N.random.randint(2)
x = self.mapestimate
x[l] = p.prctile ( self.mcestimates[:,l], (2.5,97.5)[k] )
# print x
return x
############################################
# Private methods
def __recomputeCorrelationsAndThresholds ( self ):
"""This method is called whenever the sample basis from the
posterior changes. This can have three reasons:
- burnin: the burnin is changed resulting in samples being
added or removed at the beginning of each chain
- thin: the thinning is changed resulting in samples being
discarded from within the chains
- sample: an additional chain is acquired. In this case,
a large number of samples is added.
"""
samples = self.getsamples()
self.__pRpd = N.zeros(samples.shape[0],'d')
self.__pRkd = N.zeros(samples.shape[0],'d')
self.__pthres = N.zeros((samples.shape[0],self.Ncuts),'d')
self.__pslope = N.zeros((samples.shape[0],self.Ncuts),'d')
self._PsiInference__infl = N.zeros(self.data.shape[0], 'd' )
for k,theta in enumerate(samples):
self.__pthres[k,:] = [self._pmf.getThres ( theta, c ) for c in self.cuts]
self.__pslope[k,:] = [self._pmf.getSlope ( theta, th ) for th in self.__pthres[k,:]]
dr = self._pmf.getDevianceResiduals ( theta, self._data )
self.__pRpd[k] = self._pmf.getRpd ( dr, theta, self._data )
self.__pRkd[k] = self._pmf.getRkd ( dr, self._data )
lpr = []
for l in self.__mcmc_logposterior_ratios:
lpr.append(l[self.burnin::self.thin,:])
lpr = N.concatenate ( lpr, 0 )
self._PsiInference__infl = -N.mean(lpr,0) + N.log(N.mean(N.exp(lpr),0))
def __determineoptimalsampling ( self, noptimizations=10, verbose=False, newstyle=False ):
"""Determine optimal sampling parameters using the Raftery&Lewis (1995) procedure
Automatically set burnin,thin,nsamples.
In addition, an object, that contains more detailed information about the sampling
is stored in self.mcmcpars
:Parameters:
*noptimizations* :
maximum number of optimization iterations. If the same
sampling parameters are obtained before, the method
terminates earlier
*verbose* :
display status messages
"""
if newstyle:
self.__tunesampler ( noptimizations, True )
return
if noptimizations==0:
return
mcmcpars = {}
# Determine size of initial test run
if self.nsamples is None:
NN = 0
for q in self.conf:
Nmin = pygibbsit.gibbsit ( q=q )["Nmin"]
NN = max(NN,Nmin)
self.nsamples = NN
if not self._maxnsamples is None and self.nsamples > self._maxnsamples:
self.nsamples = self._maxnsamples
a = self.__roughvariance ()
# a = 0.1*self.mapestimate
# asympvar = N.diag(fisherinv)
# a = self.afac*N.sqrt(asympvar)
# print a
# chain,deviance,ppdata,ppdeviances,ppRpd,ppRkd,logpostratios = interface.mcmc ( self.data, self.mapestimate, NN, stepwidths=a, **self.model )
# a = N.sqrt(N.diag(N.cov(chain.T)))
# print a
oldburnin = 0
oldthin = 1
oldnsamples = NN
self.debug_samples = []
for n in xrange ( noptimizations ):
if self._sampler == "DefaultMCMC":
steps_or_proposal = self._proposal
else:
steps_or_proposal = a
samples,deviances,ppdata,ppdeviances,ppRpd,ppRkd,logpostratios,accept_rate = interface.mcmc (
self.data, self.mapestimate, NN, stepwidths=steps_or_proposal, sampler=self._sampler, **self.model )
print "Acceptance:",accept_rate
self.debug_samples.append ( samples )
# Check all desired thresholds
for q in self.conf:
for k in xrange ( len(self.mapestimate) ):
try:
mcmcpars = pygibbsit.gibbsit ( samples[:,k], q=q )
except IndexError:
continue
self.burnin = max ( self.burnin, mcmcpars.burnin )
self.thin = max ( self.thin, mcmcpars.thin )
self.nsamples = max ( self.nsamples, mcmcpars.Nsamples )
# Determine standard deviations of samples but don't store them in a
b = N.sqrt(N.diag(N.cov ( samples[self.burnin::self.thin].T )))
# Check whether b is good, otherwise use roughvariance again
if b.max() < 1e-10:
a = self.__roughvariance ()
else:
a = b
if verbose:
print "Burnin:",self.burnin,"Thinning:",self.thin,"Nsamples:",self.nsamples
print "Steps:",a
if self.nsamples <= oldnsamples:
# if oldburnin==self.burnin and oldthin==self.thin and oldnsamples==self.nsamples:
break
else:
oldburnin,oldthin,oldnsamples = self.burnin,self.thin,self.nsamples
if not self._maxnsamples is None and | |
'''
Manage web apps.
'''
from .. pyaz_utils import _call_az
from . import auth, config, connection, cors, deleted, deployment, hybrid_connection, identity, log, traffic_routing, vnet_integration
def create(name, plan, resource_group, assign_identity=None, deployment_container_image_name=None, deployment_local_git=None, deployment_source_branch=None, deployment_source_url=None, docker_registry_server_password=<PASSWORD>, docker_registry_server_user=None, multicontainer_config_file=None, multicontainer_config_type=None, role=None, runtime=None, scope=None, startup_file=None, subnet=None, tags=None, vnet=None):
'''
Create a web app.
Required Parameters:
- name -- name of the new web app
- plan -- name or resource id of the app service plan. Use 'appservice plan create' to get one
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- assign_identity -- accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples
- deployment_container_image_name -- Container image name from Docker Hub, e.g. publisher/image-name:tag
- deployment_local_git -- enable local git
- deployment_source_branch -- the branch to deploy
- deployment_source_url -- Git repository URL to link with manual integration
- docker_registry_server_password -- The container registry server password. Required for private registries.
- docker_registry_server_user -- the container registry server username
- multicontainer_config_file -- Linux only. Config file for multicontainer apps. (local or remote)
- multicontainer_config_type -- Linux only.
- role -- Role name or id the system assigned identity will have
- runtime -- canonicalized web runtime in the format of Framework|Version, e.g. "PHP|7.2". Allowed delimiters: "|" or ":". Use `az webapp list-runtimes` for available list
- scope -- Scope that the system assigned identity can access
- startup_file -- Linux only. The web's startup file
- subnet -- Name or resource ID of the pre-existing subnet to have the webapp join. The --vnet is argument also needed if specifying subnet by name.
- tags -- space-separated tags: key[=value] [key[=value] ...]. Use '' to clear existing tags.
- vnet -- Name or resource ID of the regional virtual network. If there are multiple vnets of the same name across different resource groups, use vnet resource id to specify which vnet to use. If vnet name is used, by default, the vnet in the same resource group as the webapp will be used. Must be used with --subnet argument.
'''
return _call_az("az webapp create", locals())
def up(app_service_environment=None, dryrun=None, html=None, launch_browser=None, location=None, logs=None, name=None, os_type=None, plan=None, resource_group=None, runtime=None, sku=None):
'''
Create a webapp and deploy code from a local workspace to the app. The command is required to run from the folder where the code is present. Current support includes Node, Python, .NET Core and ASP.NET. Node, Python apps are created as Linux apps. .Net Core, ASP.NET, and static HTML apps are created as Windows apps. Append the html flag to deploy as a static HTML app.
Optional Parameters:
- app_service_environment -- name of the (pre-existing) App Service Environment to deploy to. Requires an Isolated V2 sku [I1v2, I2v2, I3v2]
- dryrun -- show summary of the create and deploy operation instead of executing it
- html -- Ignore app detection and deploy as an html app
- launch_browser -- Launch the created app using the default browser
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- logs -- Configure default logging required to enable viewing log stream immediately after launching the webapp
- name -- name of the web app. If left unspecified, a name will be randomly generated. You can configure the default using `az configure --defaults web=<name>`
- os_type -- Set the OS type for the app to be created.
- plan -- name of the appserviceplan associated with the webapp
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- runtime -- canonicalized web runtime in the format of Framework|Version, e.g. "PHP|7.2". Allowed delimiters: "|" or ":". Use `az webapp list-runtimes` for available list.
- sku -- The pricing tiers, e.g., F1(Free), D1(Shared), B1(Basic Small), B2(Basic Medium), B3(Basic Large), S1(Standard Small), P1V2(Premium V2 Small), P1V3(Premium V3 Small), P2V3(Premium V3 Medium), P3V3(Premium V3 Large), PC2 (Premium Container Small), PC3 (Premium Container Medium), PC4 (Premium Container Large), I1 (Isolated Small), I2 (Isolated Medium), I3 (Isolated Large), I1v2 (Isolated V2 Small), I2v2 (Isolated V2 Medium), I3v2 (Isolated V2 Large)
'''
return _call_az("az webapp up", locals())
def ssh(name, resource_group, instance=None, port=None, slot=None, timeout=None):
'''
SSH command establishes a ssh session to the web container and developer would get a shell terminal remotely.
Required Parameters:
- name -- name of the web app. If left unspecified, a name will be randomly generated. You can configure the default using `az configure --defaults web=<name>`
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- instance -- Webapp instance to connect to. Defaults to none.
- port -- Port for the remote connection. Default: Random available port
- slot -- the name of the slot. Default to the productions slot if not specified
- timeout -- timeout in seconds. Defaults to none
'''
return _call_az("az webapp ssh", locals())
def list(resource_group=None):
'''
List web apps.
Optional Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az webapp list", locals())
def show(name, resource_group, slot=None):
'''
Get the details of a web app.
Required Parameters:
- name -- name of the web app. If left unspecified, a name will be randomly generated. You can configure the default using `az configure --defaults web=<name>`
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- slot -- the name of the slot. Default to the productions slot if not specified
'''
return _call_az("az webapp show", locals())
def delete(name, resource_group, keep_dns_registration=None, keep_empty_plan=None, keep_metrics=None, slot=None):
'''
Delete a web app.
Required Parameters:
- name -- name of the web app. If left unspecified, a name will be randomly generated. You can configure the default using `az configure --defaults web=<name>`
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- keep_dns_registration -- keep DNS registration
- keep_empty_plan -- keep empty app service plan
- keep_metrics -- keep app metrics
- slot -- the name of the slot. Default to the productions slot if not specified
'''
return _call_az("az webapp delete", locals())
def stop(name, resource_group, slot=None):
'''
Stop a web app.
Required Parameters:
- name -- name of the web app. If left unspecified, a name will be randomly generated. You can configure the default using `az configure --defaults web=<name>`
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- slot -- the name of the slot. Default to the productions slot if not specified
'''
return _call_az("az webapp stop", locals())
def start(name, resource_group, slot=None):
'''
Start a web app.
Required Parameters:
- name -- name of the web app. If left unspecified, a name will be randomly generated. You can configure the default using `az configure --defaults web=<name>`
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- slot -- the name of the slot. Default to the productions slot if not specified
'''
return _call_az("az webapp start", locals())
def restart(name, resource_group, slot=None):
'''
Restart a web app.
Required Parameters:
- name -- name of the web app. If left unspecified, a name will be randomly generated. You can configure the default using `az configure --defaults web=<name>`
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- slot -- the name of the slot. Default to the productions slot if not | |
import unittest
from data_structures.linked_list import InsertPositions, SearchPositions
from data_structures.linked_list.circular_linked import (
CircularDoublyLinkedList, CircularSinglyLinkedList
)
class CircularSinglyLinkedListTests(unittest.TestCase):
def setUp(self):
self.linked_list = CircularSinglyLinkedList()
self.data = 1
def tearDown(self):
pass
def test_inserting_after(self):
#: insert positionally after in an empty linked list defaults operation to beginning of list
self.position = InsertPositions.AFTER
self.assertEqual(0, self.linked_list.size())
self.assertFalse(self.linked_list.is_circular())
self.linked_list.insert(self.data, position=self.position)
self.assertEqual(1, self.linked_list.size())
self.assertEqual(self.data, self.linked_list.get_head().get_data())
self.assertListEqual([1], self.linked_list.to_array())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(
self.linked_list.get_head().get_data(),
self.linked_list.get_tail().get_next().get_data()
)
#: insert another item after the first item
#: we don't expect inserted item at head of the list
self.reference, self.data = self.data, 2
self.linked_list.insert(self.data, position=self.position, reference_value=self.reference)
self.assertEqual(2, self.linked_list.size())
self.assertNotEqual(self.data, self.linked_list.get_head().get_data())
self.assertListEqual([1, 2], self.linked_list.to_array())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(
self.linked_list.get_head().get_data(),
self.linked_list.get_tail().get_next().get_data()
)
#: insert another item after a non-present item defaults operation to beginning of list
#: we expect inserted item at head of the list
self.reference, self.data = 4, 3
self.linked_list.insert(self.data, position=self.position, reference_value=self.reference)
self.assertEqual(3, self.linked_list.size())
self.assertEqual(self.data, self.linked_list.get_head().get_data())
self.assertListEqual([3, 1, 2], self.linked_list.to_array())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(
self.linked_list.get_head().get_data(),
self.linked_list.get_tail().get_next().get_data()
)
#: insert another item after an item that is in between the head and tail of the list
#: we don't expect inserted item at head of the list
self.reference, self.data = 1, 4
self.linked_list.insert(self.data, position=self.position, reference_value=self.reference)
self.assertEqual(4, self.linked_list.size())
self.assertNotEqual(self.data, self.linked_list.get_head().get_data())
self.assertListEqual([3, 1, 4, 2], self.linked_list.to_array())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(
self.linked_list.get_head().get_data(),
self.linked_list.get_tail().get_next().get_data()
)
def test_inserting_before(self):
#: insert positionally after in an empty linked list defaults operation to beginning of list
self.position = InsertPositions.BEFORE
self.assertEqual(0, self.linked_list.size())
self.linked_list.insert(self.data, position=self.position)
self.assertEqual(1, self.linked_list.size())
self.assertEqual(self.data, self.linked_list.get_head().get_data())
self.assertListEqual([1], self.linked_list.to_array())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(
self.linked_list.get_head().get_data(),
self.linked_list.get_tail().get_next().get_data()
)
#: insert another item before the first item
#: we expect inserted item at head of the list as there's only one item in the list
self.reference, self.data = self.data, 2
self.linked_list.insert(self.data, position=self.position, reference_value=self.reference)
self.assertEqual(2, self.linked_list.size())
self.assertEqual(self.data, self.linked_list.get_head().get_data())
self.assertListEqual([2, 1], self.linked_list.to_array())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(
self.linked_list.get_head().get_data(),
self.linked_list.get_tail().get_next().get_data()
)
#: insert another item after the a non-present item defaults operation to beginning of list
#: we expect inserted item at head of the list
self.reference, self.data = 4, 3
self.linked_list.insert(self.data, position=self.position, reference_value=self.reference)
self.assertEqual(3, self.linked_list.size())
self.assertEqual(self.data, self.linked_list.get_head().get_data())
self.assertListEqual([3, 2, 1], self.linked_list.to_array())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(
self.linked_list.get_head().get_data(),
self.linked_list.get_tail().get_next().get_data()
)
#: insert another item after an item that is in between the head and tail of the list
#: we don't expect inserted item at head of the list
self.reference, self.data = 1, 4
self.linked_list.insert(self.data, position=self.position, reference_value=self.reference)
self.assertEqual(4, self.linked_list.size())
self.assertNotEqual(self.data, self.linked_list.get_head().get_data())
self.assertListEqual([3, 2, 4, 1], self.linked_list.to_array())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(
self.linked_list.get_head().get_data(),
self.linked_list.get_tail().get_next().get_data()
)
def test_inserting_at_beginning(self):
self.assertEqual(0, self.linked_list.size())
self.linked_list.push(self.data)
self.assertEqual(1, self.linked_list.size())
self.assertEqual(self.data, self.linked_list.get_head().get_data())
self.assertListEqual([1], self.linked_list.to_array())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(
self.linked_list.get_head().get_data(),
self.linked_list.get_tail().get_next().get_data()
)
# insert another item
self.data = 2
self.linked_list.push(self.data)
self.assertEqual(2, self.linked_list.size())
self.assertEqual(self.data, self.linked_list.get_head().get_data())
self.assertListEqual([2, 1], self.linked_list.to_array())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(
self.linked_list.get_head().get_data(),
self.linked_list.get_tail().get_next().get_data()
)
def test_inserting_at_end(self):
self.assertEqual(0, self.linked_list.size())
self.linked_list.append(self.data)
self.assertEqual(1, self.linked_list.size())
self.assertEqual(self.data, self.linked_list.get_head().get_data())
self.assertListEqual([1], self.linked_list.to_array())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(
self.linked_list.get_head().get_data(),
self.linked_list.get_tail().get_next().get_data()
)
# insert another item
self.data = 2
self.linked_list.append(self.data)
self.assertEqual(2, self.linked_list.size())
self.assertNotEqual(self.data, self.linked_list.get_head().get_data())
self.assertListEqual([1, 2], self.linked_list.to_array())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(
self.linked_list.get_head().get_data(),
self.linked_list.get_tail().get_next().get_data()
)
# insert another item
self.data = 2
self.linked_list.append(self.data)
self.assertEqual(3, self.linked_list.size())
self.assertNotEqual(self.data, self.linked_list.get_head().get_data())
self.assertListEqual([1, 2, 2], self.linked_list.to_array())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(
self.linked_list.get_head().get_data(),
self.linked_list.get_tail().get_next().get_data()
)
def test_search(self):
self.assertEqual(0, self.linked_list.size())
self.linked_list.insert(self.data)
self.assertEqual(1, self.linked_list.size())
self.assertEqual(self.data, self.linked_list.search(self.data).get_data())
#: no node before, inserted item at head of list
self.assertEqual(None, self.linked_list.search(self.data, position=SearchPositions.BEFORE))
#: in a circular linked list with one node, the head points to itself as the next node
self.assertEqual(
self.data,
self.linked_list.search(self.data, position=SearchPositions.AFTER).get_data()
)
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(
self.linked_list.get_head().get_data(),
self.linked_list.get_tail().get_next().get_data()
)
#: insert new item and search positionally
self.previous, self.data = self.data, 2
#: insert with default behaviour; i.e. at the beginning of the linked list
self.linked_list.insert(self.data)
#: search self.data, nothing should be before, self.previous should be after
#: because the insert was done at the beginning
self.assertEqual(None, self.linked_list.search(self.data, position=SearchPositions.BEFORE))
self.assertEqual(
self.previous,
self.linked_list.search(self.data, position=SearchPositions.AFTER).get_data()
)
#: search self.previous, self.data should be before, nothing should be after
#: because the insert was done at the beginning
self.assertEqual(
self.data,
self.linked_list.search(self.previous, position=SearchPositions.BEFORE).get_data()
)
self.assertEqual(
self.data,
self.linked_list.search(self.previous, position=SearchPositions.AFTER).get_data()
)
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(
self.linked_list.get_head().get_data(),
self.linked_list.get_tail().get_next().get_data()
)
#: search non-existing item
self.assertEqual(
None, self.linked_list.search("does not exist")
)
#: search for node after
self.assertEqual(
None, self.linked_list.search("does not exist", position=SearchPositions.AFTER)
)
#: search for node before
self.assertEqual(
None, self.linked_list.search("does not exist", position=SearchPositions.BEFORE)
)
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(
self.linked_list.get_head().get_data(),
self.linked_list.get_tail().get_next().get_data()
)
def test_delete(self):
self.assertEqual(0, self.linked_list.size())
self.linked_list.insert(self.data)
self.assertEqual(1, self.linked_list.size())
self.assertListEqual([1], self.linked_list.to_array())
self.linked_list.delete(self.data)
self.assertEqual(0, self.linked_list.size())
self.assertListEqual([], self.linked_list.to_array())
self.assertEqual(None, self.linked_list.get_head())
self.assertFalse(self.linked_list.is_circular())
self.assertIsNone(self.linked_list.get_head())
self.assertIsNone(self.linked_list.get_tail())
#: use a bigger list
self.assertEqual(0, self.linked_list.size())
self.linked_list.push(self.data)
self.linked_list.append(2)
self.linked_list.insert(3, position=InsertPositions.AFTER, reference_value=self.data)
self.linked_list.insert(4, position=InsertPositions.BEFORE, reference_value=self.data)
self.assertEqual(4, self.linked_list.size())
self.assertListEqual([4, 1, 3, 2], self.linked_list.to_array())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(
self.linked_list.get_head().get_data(),
self.linked_list.get_tail().get_next().get_data()
)
#: delete a non-existent item
self.linked_list.delete("non-existent")
self.assertEqual(4, self.linked_list.size())
self.assertListEqual([4, 1, 3, 2], self.linked_list.to_array())
self.assertEqual(4, self.linked_list.get_head().get_data())
self.assertEqual(2, self.linked_list.get_tail().get_data())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(
self.linked_list.get_head().get_data(),
self.linked_list.get_tail().get_next().get_data()
)
#: delete 1
self.linked_list.delete(self.data)
self.assertEqual(3, self.linked_list.size())
self.assertListEqual([4, 3, 2], self.linked_list.to_array())
self.assertEqual(3, self.linked_list.get_head().get_next().get_data())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(
self.linked_list.get_head().get_data(),
self.linked_list.get_tail().get_next().get_data()
)
#: delete 4; head of the list
self.linked_list.delete(4)
self.assertEqual(2, self.linked_list.size())
self.assertListEqual([3, 2], self.linked_list.to_array())
self.assertEqual(3, self.linked_list.get_head().get_data())
self.assertEqual(2, self.linked_list.get_head().get_next().get_data())
self.assertEqual(3, self.linked_list.get_tail().get_next().get_data())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(
self.linked_list.get_head().get_data(),
self.linked_list.get_tail().get_next().get_data()
)
#: delete 2; tail of the list
self.linked_list.delete(2)
self.assertEqual(1, self.linked_list.size())
self.assertListEqual([3], self.linked_list.to_array())
self.assertEqual(3, self.linked_list.get_head().get_data())
self.assertEqual(3, self.linked_list.get_head().get_next().get_data())
self.assertEqual(3, self.linked_list.get_tail().get_next().get_data())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(
self.linked_list.get_head().get_data(),
self.linked_list.get_tail().get_next().get_data()
)
class CircularDoublyLinkedListTests(unittest.TestCase):
def setUp(self):
self.linked_list = CircularDoublyLinkedList()
self.data = 1
def tearDown(self):
pass
def test_inserting_after(self):
#: insert positionally after in an empty linked list defaults operation to beginning of list
self.position = InsertPositions.AFTER
self.assertEqual(0, self.linked_list.size())
self.assertEqual(None, self.linked_list.get_head())
self.assertEqual(None, self.linked_list.get_tail())
self.linked_list.insert(self.data, position=self.position)
self.assertEqual(1, self.linked_list.size())
self.assertEqual(self.data, self.linked_list.get_head().get_data())
self.assertEqual(self.data, self.linked_list.get_tail().get_data())
self.assertEqual(self.linked_list.get_tail(), self.linked_list.get_head().get_previous())
self.assertEqual(self.linked_list.get_tail(), self.linked_list.get_head().get_next())
self.assertEqual(self.linked_list.get_head(), self.linked_list.get_tail().get_previous())
self.assertEqual(self.linked_list.get_head(), self.linked_list.get_tail().get_next())
self.assertListEqual([1], self.linked_list.to_array())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(
self.linked_list.get_head().get_data(),
self.linked_list.get_tail().get_next().get_data()
)
#: insert another item after the first item
#: we don't expect inserted item at head of the list
self.reference, self.data = self.data, 2
self.linked_list.insert(self.data, position=self.position, reference_value=self.reference)
self.assertEqual(2, self.linked_list.size())
self.assertNotEqual(self.data, self.linked_list.get_head().get_data())
self.assertEqual(self.reference, self.linked_list.get_head().get_data())
self.assertEqual(self.data, self.linked_list.get_head().get_next().get_data())
self.assertEqual(self.data, self.linked_list.get_tail().get_data())
self.assertEqual(self.reference, self.linked_list.get_tail().get_previous().get_data())
self.assertEqual(self.linked_list.get_head(), self.linked_list.get_tail().get_next())
self.assertListEqual([1, 2], self.linked_list.to_array())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(self.linked_list.get_tail(), self.linked_list.get_head().get_previous())
self.assertEqual(self.linked_list.get_tail(), self.linked_list.get_head().get_next())
self.assertEqual(self.linked_list.get_head(), self.linked_list.get_tail().get_previous())
self.assertEqual(self.linked_list.get_head(), self.linked_list.get_tail().get_next())
#: insert another item after the a non-present item defaults operation to beginning of list
#: we expect inserted item at head of the list
self.reference, self.data = 4, 3
self.linked_list.insert(self.data, position=self.position, reference_value=self.reference)
self.assertEqual(3, self.linked_list.size())
self.assertEqual(self.data, self.linked_list.get_head().get_data())
self.assertEqual(1, self.linked_list.get_head().get_next().get_data())
self.assertEqual(
self.data, self.linked_list.get_head().get_next().get_previous().get_data()
)
self.assertEqual(2, self.linked_list.get_tail().get_data())
self.assertEqual(2, self.linked_list.get_head().get_next().get_next().get_data())
self.assertEqual(
1, self.linked_list.get_head().get_next().get_next().get_previous().get_data()
)
self.assertListEqual([3, 1, 2], self.linked_list.to_array())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(self.linked_list.get_tail(), self.linked_list.get_head().get_previous())
self.assertEqual(1, self.linked_list.get_head().get_next().get_data())
self.assertEqual(1, self.linked_list.get_tail().get_previous().get_data())
self.assertEqual(self.linked_list.get_head(), self.linked_list.get_tail().get_next())
#: insert another item after an item that is in between the head and tail of the list
#: we don't expect inserted item at head of the list
self.reference, self.data = 1, 4
self.linked_list.insert(self.data, position=self.position, reference_value=self.reference)
self.assertEqual(4, self.linked_list.size())
self.assertNotEqual(self.data, self.linked_list.get_head().get_data())
self.assertEqual(1, self.linked_list.get_head().get_next().get_data())
self.assertEqual(3, self.linked_list.get_head().get_next().get_previous().get_data())
self.assertEqual(self.data, self.linked_list.get_head().get_next().get_next().get_data())
self.assertEqual(
1, self.linked_list.get_head().get_next().get_next().get_previous().get_data()
)
self.assertEqual(
2, self.linked_list.get_head().get_next().get_next().get_next().get_data()
)
self.assertEqual(
self.data,
self.linked_list.get_head().get_next().get_next().get_next().get_previous().get_data()
)
self.assertListEqual([3, 1, 4, 2], self.linked_list.to_array())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(self.linked_list.get_tail(), self.linked_list.get_head().get_previous())
self.assertEqual(1, self.linked_list.get_head().get_next().get_data())
self.assertEqual(4, self.linked_list.get_tail().get_previous().get_data())
self.assertEqual(self.linked_list.get_head(), self.linked_list.get_tail().get_next())
def test_inserting_before(self):
#: insert positionally after in an empty linked list defaults operation to beginning of list
self.position = InsertPositions.BEFORE
self.assertEqual(0, self.linked_list.size())
self.assertEqual(None, self.linked_list.get_head())
self.assertEqual(None, self.linked_list.get_tail())
self.linked_list.insert(self.data, position=self.position)
self.assertEqual(1, self.linked_list.size())
self.assertEqual(self.data, self.linked_list.get_head().get_data())
self.assertEqual(self.data, self.linked_list.get_tail().get_data())
self.assertEqual(self.linked_list.get_tail(), self.linked_list.get_head().get_previous())
self.assertEqual(self.linked_list.get_tail(), self.linked_list.get_head().get_next())
self.assertEqual(self.linked_list.get_head(), self.linked_list.get_tail().get_previous())
self.assertEqual(self.linked_list.get_head(), self.linked_list.get_tail().get_next())
self.assertListEqual([1], self.linked_list.to_array())
self.assertTrue(self.linked_list.is_circular())
#: insert another item before the first item
#: we expect inserted item at head of the list as there's only one item in the list
self.reference, self.data = self.data, 2
self.linked_list.insert(self.data, position=self.position, reference_value=self.reference)
self.assertEqual(2, self.linked_list.size())
self.assertEqual(self.data, self.linked_list.get_head().get_data())
self.assertEqual(self.reference, self.linked_list.get_head().get_next().get_data())
self.assertEqual(self.reference, self.linked_list.get_tail().get_data())
self.assertEqual(self.data, self.linked_list.get_tail().get_previous().get_data())
self.assertEqual(self.linked_list.get_head(), self.linked_list.get_tail().get_next())
self.assertListEqual([2, 1], self.linked_list.to_array())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(self.linked_list.get_tail(), self.linked_list.get_head().get_previous())
self.assertEqual(self.linked_list.get_tail(), self.linked_list.get_head().get_next())
self.assertEqual(self.linked_list.get_head(), self.linked_list.get_tail().get_previous())
self.assertEqual(self.linked_list.get_head(), self.linked_list.get_tail().get_next())
#: insert another item after the a non-present item defaults operation to beginning of list
#: we expect inserted item at head of the list
self.reference, self.data = 4, 3
self.linked_list.insert(self.data, position=self.position, reference_value=self.reference)
self.assertEqual(3, self.linked_list.size())
self.assertEqual(self.data, self.linked_list.get_head().get_data())
self.assertEqual(2, self.linked_list.get_head().get_next().get_data())
self.assertEqual(
self.data, self.linked_list.get_head().get_next().get_previous().get_data()
)
self.assertEqual(1, self.linked_list.get_tail().get_data())
self.assertEqual(1, self.linked_list.get_head().get_next().get_next().get_data())
self.assertEqual(
2, self.linked_list.get_head().get_next().get_next().get_previous().get_data()
)
self.assertListEqual([3, 2, 1], self.linked_list.to_array())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(self.linked_list.get_tail(), self.linked_list.get_head().get_previous())
self.assertEqual(2, self.linked_list.get_head().get_next().get_data())
self.assertEqual(2, self.linked_list.get_tail().get_previous().get_data())
self.assertEqual(self.linked_list.get_head(), self.linked_list.get_tail().get_next())
#: insert another item after an item that is in between the head and tail of the list
#: we don't expect inserted item at head of the list
self.reference, self.data = 1, 4
self.linked_list.insert(self.data, position=self.position, reference_value=self.reference)
self.assertEqual(4, self.linked_list.size())
self.assertNotEqual(self.data, self.linked_list.get_head().get_data())
self.assertEqual(2, self.linked_list.get_head().get_next().get_data())
self.assertEqual(3, self.linked_list.get_head().get_next().get_previous().get_data())
self.assertEqual(self.data, self.linked_list.get_head().get_next().get_next().get_data())
self.assertEqual(
2, self.linked_list.get_head().get_next().get_next().get_previous().get_data()
)
self.assertEqual(
1, self.linked_list.get_head().get_next().get_next().get_next().get_data()
)
self.assertEqual(
1, self.linked_list.get_tail().get_data()
)
self.assertEqual(
self.data,
self.linked_list.get_head().get_next().get_next().get_next().get_previous().get_data()
)
self.assertListEqual([3, 2, 4, 1], self.linked_list.to_array())
self.assertTrue(self.linked_list.is_circular())
self.assertEqual(self.linked_list.get_tail(), self.linked_list.get_head().get_previous())
self.assertEqual(2, self.linked_list.get_head().get_next().get_data())
self.assertEqual(4, self.linked_list.get_tail().get_previous().get_data())
self.assertEqual(self.linked_list.get_head(), self.linked_list.get_tail().get_next())
def test_inserting_at_beginning(self):
self.assertEqual(0, self.linked_list.size())
self.assertEqual(None, self.linked_list.get_head())
self.assertEqual(None, self.linked_list.get_tail())
self.linked_list.push(self.data)
self.assertEqual(1, | |
presses have been made
Pauses program execution in mean time.
Example:
res = bits.statusBoxWaitN(5)
will suspend all other activity until 5 button presses have been
recorded and will then return a list of Dicts containing the 5 results.
Results can be accessed as follows:
structure::
res[0].dir, res[0].button, res[0].time
or dictionary::
res[0]['dir'], res[0]['button'], res[0]['time']
Note that the firmware in Bits# units varies over time and some
features of this class may not work for all firmware versions.
Also Bits# units can be configured in various ways via their
config.xml file so this class makes certain assumptions about the
configuration. In particular it is assumed that all digital inputs,
triggers and analog inputs are reported as part of status
updates. If some of these report are disabled in your config.xml file
then 'status' and 'event' commands in this class may not work.
"""
if self.noComms:
return
while self.statusQ.qsize() < N:
continue # ie loop
return self.getStatusBoxResponses(N)
def statusBoxWait(self):
"""Waits until (at least) one
of RTBox style key presses have been made
Pauses program execution in mean time.
Example:
res = bits.statusBoxWait()
will suspend all other activity until 1 button press has been
recorded and will then return a dict / strcuture containing results.
Results can be accessed as follows:
structure
res.dir, res.button, res.time
or dictionary
res['dir'], res['button'], res['time']
Note that the firmware in Bits# units varies over time and some
features of this class may not work for all firmware versions.
Also DBits# units can be configured in various ways via their
config.xml file so this class makes certain assumptions about the
configuration. In particular it is assumed that all digital inputs,
triggers and analog inputs are reported as part of status
updates. If some of these report are disabled in your config.xml file
then 'status' and 'event' commands in this class may not work.
"""
if self.noComms:
return
while self.statusQ.empty():
continue # ie loop
return self.getStatusBoxResponse()
#====================================================================#
# Helper function to run in its own thread detecting statusBox #
# events and putting them in a queue #
#====================================================================#
def _statusBox(self):
""" Should not normally be called by user
Called in its own thread via self.statusBoxEnable()
Reads the status reports from the Bits# for default 60 seconds or
until self.statusBoxDisable() is called.
Note any non status reports are found on the buffer will
cause an error.
args specifies the time over which to record status events.
The minimum time is 10ms, less than this results in recording stopping after
about 1 status report has been read.
Puts its results into a Queue.
This function is normally run in its own thread so actions can be asynchronous.
"""
# Continue reading data until sample time is up or status.End is set
# Note when used in thread statusEnd can be set from outside this function.
firstIn = True # we do something different for the very first status entry
while (self.statusBoxEnd == False):
self.com.timeout = 0.01
raw=""
nChars = self._inWaiting()
if nChars >= self._statusSize: # we many have a status report
# use self.com.read() to get exact number of chars
raw = self.com.read(nChars)
msg=raw.decode("utf-8")
# just in case split message into status lines marked by CR
lines = msg.split('\r')
N = len(lines)
values = [button() for i in range(N)]
for i in range(N-1): # for each status line
# split line into component parts marked by ;
v=lines[i].split(';')
if v[0] == '#sample': # Check we have read a status line
if not firstIn: # Not first status entry
# Check digital inputs
for sourse in range(17):
# if input has changed and input is mapped
if (v[3+sourse] != log[3+sourse]
and self.statusButtonMap[sourse] < 24):
if ('Down' in self.statusBoxMode
or 'down' in self.statusBoxMode):
if int(v[3+sourse]) == 0:
values[i].button = (
self.statusButtonMap[sourse])
values[i].time = float(v[2])
values[i].dir = 'down'
self.statusQ.put(values[i])
if ('Up' in self.statusBoxMode
or 'up' in self.statusBoxMode):
if int(v[3+sourse]) == 1:
values[i].button = (
self.statusButtonMap[sourse])
values[i].time = float(v[2])
values[i].dir = 'up'
self.statusQ.put(values[i])
# save new state of sourse
log[3+sourse] = v[3+sourse]
# Check analog inputs
for analog in range(6):
sourse = analog + 17
fLog = float(log[3+sourse])
fVal = float(v[3+sourse])
# if input has changed enough and input is mapped
if (abs(fVal - fLog)
> self.statusBoxThreshold
and self.statusButtonMap[sourse] < 24):
if ('Down' in self.statusBoxMode
or 'down' in self.statusBoxMode):
# Is change of input in downward direction
if fLog > fVal:
values[i].button = (
self.statusButtonMap[sourse])
values[i].time = float(v[2])
values[i].dir = 'down'
self.statusQ.put(values[i])
if ('Up' in self.statusBoxMode
or 'up' in self.statusBoxMode):
# Is change of input in upward direction
if fVal > fLog:
values[i].button = (
self.statusButtonMap[sourse])
values[i].time = float(v[2])
values[i].dir = 'up'
self.statusQ.put(values[i])
# save new state of sourse
log[3+sourse] = v[3+sourse]
else: # this is the first entry
log = deepcopy(v) # make a copy of status entry
firstIn = False
elif v[0] == '$touch': # We've read a screen touch event by mistake.
warning = ("_statusBox found touch"
" data on input so skipping that")
logging.warning(warning)
else: # We've read something we can't interpret.
warning = ("_statusBox found unknown data"
" on input so skipping that")
logging.warning(warning)
# clearn up when stop is called.
# Send stop signal to CRS device to shut it up.
self._statusDisable() # Send stop signal to CRS device to shut it up.
self.statusBoxEnd = True # Confirm that data logging has ended.
self.flush()
#====================================================================#
# 'status' functions use BitsSharp status reporting to read the #
# digital IO, IR channels #
# and analog inputs via a separate thread #
#====================================================================#
def setStatusEventParams(self, DINBase=0b1111111111,
IRBase=0b111111,
TrigInBase=0,
ADCBase=0,
threshold=9999.99,
mode=['up','down']):
""" Sets the parameters used to determine if a status value represents
a reportable event.
DIN_base = a 10 bit binary word specifying the expected starting
values of the 10 digital input lines
IR_base = a 6 bit binary word specifying the expected starting
values of the 6 CB6 IR buttons
Trig_base = the starting value of the Trigger input
mode = a list of event types to monitor can be 'up' or 'down'
typically 'down' corresponds to a button press or when the input
is being pulled down to zero volts.
Example::
bits.setStatusEventParams(DINBase=0b1111111111,
IRBase=0b111111,
TrigInBase=0,
ADCBase=0,
threshold = 3.4,
mode = ['down'])
bits.startStatusLog()
while not event
#do some processing
continue
bits.stopStatusLog()
res=getAllStatusEvents(0)
print(bits.res.time)
This ill start the event extraction process as if DINs and IRs are all '1', Trigger is '0'
ADCs = 0 with an ADC threshold for change of 3.4 volts,
and will only register 'down' events. Here we display the time stamp of the first event.
Note that the firmware in Display++ units varies over time and some
features of this class may not work for all firmware versions.
Also Display++ units can be configured in various ways via their
config.xml file so this class makes certain assumptions about the
configuration. In particular it is assumed that all digital inputs,
triggers and analog inputs are reported as part of status
updates. If some of these report are disabled in your config.xml file
then 'status' and 'event' commands in this class may not work.
"""
self.statusDINBase = DINBase # Initial values for ditgial ins
self.statusIRBase = IRBase # Initial values for CB6 IR box
self.statusTrigInBase = TrigInBase # Initial values for TrigIn
self.statusADCBase = ADCBase # Initial value for all ADCs
self.statusThreshold = threshold # Threshold for ADC events
self.statusMode = mode # Direction of events to be reported
def pollStatus(self, t=0.0001):
""" | |
"""Text Summarization related modeling class"""
from typing import List, Optional, Union
import torch
from fairseq import hub_utils
from torch import nn
from pororo.models.brainbert.BrainRoBERTa import BrainRobertaHubInterface
from pororo.tasks import download_or_load
from pororo.tasks.utils.base import (
PororoFactoryBase,
PororoGenerationBase,
PororoSimpleBase,
)
class PororoSummarizationFactory(PororoFactoryBase):
"""
Text summarization using various pretrained models
Korean (`kobart.base.ko.summary`)
- dataset: Dacon summarization corpus + AI Hub summarization corpus (1st release)
- metric: Rouge-1 (52.03), Rouge-2 (45.18), Rouge-L (49.48)
- ref: https://dacon.io/competitions/official/235671/data/
- ref: https://www.aihub.or.kr/node/9176
Korean (`kobart.base.ko.bullet`)
- dataset: Internal Corpus
- metric: Rouge-1 (8.03), Rouge-2 (2.38), Rouge-L (7.23)
Korean (`brainbert.base.ko.summary`)
- dataset: Dacon summarization corpus + AI Hub summarization corpus (1st release)
- metric: Rouge-1 (42.67), Rouge-2 (31.80), Rouge-L (43.12)
- ref: https://dacon.io/competitions/official/235671/data/
- ref: https://www.aihub.or.kr/node/9176
Notes:
Pororo supports 3 different types of summarization like below.
1. Abtractive summarization : Model generate a summary in the form of a complete sentence.
2. Bullet-point summarization : Model generate multiple summaries in the form of a short phrase.
3. Extractive summarization : Model extract 3 important sentences from article.
Args:
text (Union[str, List[str]]): input text to be extracted
beam (int): beam search size
temperature (float): temperature scale
top_k (int): top-K sampling vocabulary size
top_p (float): top-p sampling ratio
no_repeat_ngram_size (int): no repeat ngram size
len_penalty (float): length penalty ratio
Returns:
(str) summarized text
Examples:
>>> # text summarization task has 3 difference models
>>> summ = Pororo(task="summarization", model="abstractive", lang="ko")
>>> summ("20년 4월 8일 자로 아카이브에 올라온 뜨끈뜨끈한 논문을 찾았다. 카카오 브레인에서 한국어 자연어 처리를 위한 새로운 데이터셋을 공개했다는 내용이다. 자연어 추론(NLI)와 텍스트의 의미적 유사성(STS)는 자연어 이해(NLU)에서 핵심 과제. 영어나 다른 언어들은 데이터셋이 몇 개 있는데, 한국어로 된 NLI나 STS 공개 데이터셋이 없다. 이에 동기를 얻어 새로운 한국어 NLI와 STS 데이터 셋을 공개한다. 이전 의 접근 방식에 따라 기존의 영어 훈련 세트를 기계 번역(machine-translate)하고 develop set과 test set을 수동으로 한국어로 번역한다. 한국어 NLU에 대한 연구가 더 활성화되길 바라며, KorNLI와 KorSTS에 baseline을 설정하며, Github에 공개한다. NLI와 STS는 자연어 이해의 중심 과제들로 많이 이야기가 된다. 이에 따라 몇몇 벤치마크 데이터셋은 영어로 된 NLI와 STS를 공개했었다. 그러나 한국어 NLI와 STS 벤치마크 데이터셋은 존재하지 않았다. 대부분의 자연어 처리 연구가 사람들이 많이 쓰는 언어들을 바탕으로 연구 가 되기 때문. 유명한 한국어 NLU 데이터 셋이 전형적으로 QA나 감정 분석은 포함은 되어있는데 NLI나 STS는 아니다. 한국어로 된 공개 NLI나 STS 벤치마크 데이터셋이 없어서 이런 핵심과제에 적합한 한국어 NLU 모델 구축에 대한 관심이 부족했다고 생각한다. 이에 동기를 얻어 KorNLI와 KorSTS를 만들었다.")
'카카오 브레인에서 자연어 이해의 중심 과제들로 많이 이야기되는 한국어 자연어 처리를 위한 새로운 데이터셋인 KorNLI와 KorSTS 데이터셋을 공개했다.'
>>> summ = Pororo(task="summarization", model="bullet", lang="ko")
>>> summ("20년 4월 8일 자로 아카이브에 올라온 뜨끈뜨끈한 논문을 찾았다. 카카오 브레인에서 한국어 자연어 처리를 위한 새로운 데이터셋을 공개했다는 내용이다. 자연어 추론(NLI)와 텍스트의 의미적 유사성(STS)는 자연어 이해(NLU)에서 핵심 과제. 영어나 다른 언어들은 데이터셋이 몇 개 있는데, 한국어로 된 NLI나 STS 공개 데이터셋이 없다. 이에 동기를 얻어 새로운 한국어 NLI와 STS 데이터 셋을 공개한다. 이전 의 접근 방식에 따라 기존의 영어 훈련 세트를 기계 번역(machine-translate)하고 develop set과 test set을 수동으로 한국어로 번역한다. 한국어 NLU에 대한 연구가 더 활성화되길 바라며, KorNLI와 KorSTS에 baseline을 설정하며, Github에 공개한다. NLI와 STS는 자연어 이해의 중심 과제들로 많이 이야기가 된다. 이에 따라 몇몇 벤치마크 데이터셋은 영어로 된 NLI와 STS를 공개했었다. 그러나 한국어 NLI와 STS 벤치마크 데이터셋은 존재하지 않았다. 대부분의 자연어 처리 연구가 사람들이 많이 쓰는 언어들을 바탕으로 연구 가 되기 때문. 유명한 한국어 NLU 데이터 셋이 전형적으로 QA나 감정 분석은 포함은 되어있는데 NLI나 STS는 아니다. 한국어로 된 공개 NLI나 STS 벤치마크 데이터셋이 없어서 이런 핵심과제에 적합한 한국어 NLU 모델 구축에 대한 관심이 부족했다고 생각한다. 이에 동기를 얻어 KorNLI와 KorSTS를 만들었다.")
['KorNLI와 KorSTS에 baseline 설정', ' 새로운 NLI와 STS 데이터 셋 공개']
>>> summ = Pororo(task="summarization", model="extractive", lang="ko")
>>> summ("20년 4월 8일 자로 아카이브에 올라온 뜨끈뜨끈한 논문을 찾았다. 카카오 브레인에서 한국어 자연어 처리를 위한 새로운 데이터셋을 공개했다는 내용이다. 자연어 추론(NLI)와 텍스트의 의미적 유사성(STS)는 자연어 이해(NLU)에서 핵심 과제. 영어나 다른 언어들은 데이터셋이 몇 개 있는데, 한국어로 된 NLI나 STS 공개 데이터셋이 없다. 이에 동기를 얻어 새로운 한국어 NLI와 STS 데이터 셋을 공개한다. 이전 의 접근 방식에 따라 기존의 영어 훈련 세트를 기계 번역(machine-translate)하고 develop set과 test set을 수동으로 한국어로 번역한다. 한국어 NLU에 대한 연구가 더 활성화되길 바라며, KorNLI와 KorSTS에 baseline을 설정하며, Github에 공개한다. NLI와 STS는 자연어 이해의 중심 과제들로 많이 이야기가 된다. 이에 따라 몇몇 벤치마크 데이터셋은 영어로 된 NLI와 STS를 공개했었다. 그러나 한국어 NLI와 STS 벤치마크 데이터셋은 존재하지 않았다. 대부분의 자연어 처리 연구가 사람들이 많이 쓰는 언어들을 바탕으로 연구 가 되기 때문. 유명한 한국어 NLU 데이터 셋이 전형적으로 QA나 감정 분석은 포함은 되어있는데 NLI나 STS는 아니다. 한국어로 된 공개 NLI나 STS 벤치마크 데이터셋이 없어서 이런 핵심과제에 적합한 한국어 NLU 모델 구축에 대한 관심이 부족했다고 생각한다. 이에 동기를 얻어 KorNLI와 KorSTS를 만들었다.")
'카카오 브레인에서 한국어 자연어 처리를 위한 새로운 데이터셋을 공개했다는 내용이다. 이에 동기를 얻어 새로운 한국어 NLI와 STS 데이터 셋을 공개한다. 한국어 NLU에 대한 연구가 더 활성화되길 바라며, KorNLI와 KorSTS에 baseline을 설정하며, Github에 공개한다.'
>>> # text summarization task supports batchwise inference
>>> summ = Pororo(task="summarization", model="abstractive", lang="ko")
>>> summ([
... "목성과 토성이 약 400년 만에 가장 가까이 만났습니다. 국립과천과학관 등 천문학계에 따르면 21일 저녁 목성과 토성은 1623년 이후 397년 만에 가장 가까워졌는데요. 크리스마스 즈음까지 남서쪽 하늘을 올려다보면 목성과 토성이 가까워지는 현상을 관측할 수 있습니다. 목성의 공전주기는 11.9년, 토성의 공전주기는 29.5년인데요. 공전주기의 차이로 두 행성은 약 19.9년에 한 번 가까워집니다. 이번 근접 때 목성과 토성 사이 거리는 보름달 지름의 5분의 1 정도로 가까워졌습니다. 맨눈으로 보면 두 행성이 겹쳐져 하나의 별처럼 보이는데요. 지난 21일 이후 목성과 토성의 대근접은 2080년 3월 15일로 예측됩니다. 과천과학관 측은 우리가 대근접을 볼 수 있는 기회는 이번이 처음이자 마지막이 될 가능성이 크다라고 설명했 습니다.",
... "가수 김태연은 걸 그룹 소녀시대, 소녀시대-태티서 및 소녀시대-Oh!GG의 리더이자 메인보컬이다. 2004년 SM에서 주최한 청소년 베스트 선발 대회에서 노래짱 대상을 수상하며 SM 엔터테인먼트에 캐스팅되었다. 이후 3년간의 연습생을 거쳐 2007년 소녀시대의 멤버로 데뷔했다. 태연은 1989년 3월 9일 대한민국 전라북도 전주시 완산구에서 아버지 김종구, 어머니 김희자 사이의 1남 2녀 중 둘째로 태어났다. 가족으로는 오빠 김지웅, 여동생 김하연이 있다. 어릴 적부터 춤을 좋아했고 특히 명절 때는 친척들이 춤을 시키면 곧잘 추었다던 태연은 TV에서 보아를 보고 가수의 꿈을 갖게 되었다고 한다. 전주양지초등학교를 졸업하였고 전주양지중학교 2학년이던 2003년 SM아카데미 스타라이트 메인지방보컬과 4기에 들어가게 되면서 아버지와 함께 주말마다 전주에서 서울로 이동하며 가수의 꿈을 키웠다. 2004년에 당시 보컬 트레이너였던 더 원의 정규 2집 수록곡 〈You Bring Me Joy (Part 2)〉에 피처링으로 참여했다. 당시 만 15세였던 태연은 현재 활동하는 소속사 SM 엔터테인먼트에 들어가기 전이었다. 이후 태연은 2004년 8월에 열린 제8회 SM 청소년 베스트 선발 대회에서 노래짱 부문에 출전해 1위(대상)를 수상하였고 SM 엔터테인먼트에 정식 캐스팅되어 연습생 생활을 시작하게 되었다. 2005년 청담고등학교에 입학하였으나, 학교 측에서 연예계 활동을 용인하지 않아 전주예술고등학교 방송문화예술과로 전학하였고 2008년 졸업하면서 학교를 빛낸 공로로 공로상을 수상했다. 태연은 연습생 생활이 힘들어 숙소에서 몰래 뛰쳐나갔다가 하루 만에 다시 돌아오기도 했다고 이야기하기도 했다. 이후 SM엔터테인먼트에서 3년여의 연습생 기간을 거쳐 걸 그룹 소녀시대의 멤버로 정식 데뷔하게 되었다."
... ])
['국립과천과학관 등 천문학계에 따르면 21일 저녁 목성과 토성은 1623년 이후 397년 만에 가장 가까워졌는데 크리스마스 즈음까지 남서쪽 하늘을 올려다보면 목성과 토성이 가까워지는 현상을 관측할 수 있다.',
'가수 태연은 2004년 SM 청소년 베스트 선발 대회에서 노래짱 대상을 수상하고 SM 엔터테인먼트에 캐스팅되어 3년간의 연습생 기간을 거쳐 2007년 소녀시대의 멤버로 데뷔했다.']
>>> summ = Pororo(task="summarization", model="bullet", lang="ko")
>>> summ([
... "목성과 토성이 약 400년 만에 가장 가까이 만났습니다. 국립과천과학관 등 천문학계에 따르면 21일 저녁 목성과 토성은 1623년 이후 397년 만에 가장 가까워졌는데요. 크리스마스 즈음까지 남서쪽 하늘을 올려다보면 목성과 토성이 가까워지는 현상을 관측할 수 있습니다. 목성의 공전주기는 11.9년, 토성의 공전주기는 29.5년인데요. 공전주기의 차이로 두 행성은 약 19.9년에 한 번 가까워집니다. 이번 근접 때 목성과 토성 사이 거리는 보름달 지름의 5분의 1 정도로 가까워졌습니다. 맨눈으로 보면 두 행성이 겹쳐져 하나의 별처럼 보이는데요. 지난 21일 이후 목성과 토성의 대근접은 2080년 3월 15일로 예측됩니다. 과천과학관 측은 우리가 대근접을 볼 수 있는 기회는 이번이 처음이자 마지막이 될 가능성이 크다라고 설명했 습니다.",
... "가수 김태연은 걸 그룹 소녀시대, 소녀시대-태티서 및 소녀시대-Oh!GG의 리더이자 메인보컬이다. 2004년 SM에서 주최한 청소년 베스트 선발 대회에서 노래짱 대상을 수상하며 SM 엔터테인먼트에 캐스팅되었다. 이후 3년간의 연습생을 거쳐 2007년 소녀시대의 멤버로 데뷔했다. 태연은 1989년 3월 9일 | |
# coding: utf-8
# Copyright 2020. ThingsBoard
# #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from tb_rest_client.api_client import ApiClient
class AssetControllerApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def assign_asset_to_customer_using_post(self, customer_id, asset_id, **kwargs): # noqa: E501
"""assignAssetToCustomer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.assign_asset_to_customer_using_post(customer_id, asset_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: customerId (required)
:param str asset_id: assetId (required)
:return: Asset
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.assign_asset_to_customer_using_post_with_http_info(customer_id, asset_id, **kwargs) # noqa: E501
else:
(data) = self.assign_asset_to_customer_using_post_with_http_info(customer_id, asset_id, **kwargs) # noqa: E501
return data
def assign_asset_to_customer_using_post_with_http_info(self, customer_id, asset_id, **kwargs): # noqa: E501
"""assignAssetToCustomer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.assign_asset_to_customer_using_post_with_http_info(customer_id, asset_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: customerId (required)
:param str asset_id: assetId (required)
:return: Asset
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['customer_id', 'asset_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
params[key] = val
del params['kwargs']
# verify the required parameter 'customer_id' is set
if ('customer_id' not in params or
params['customer_id'] is None):
raise ValueError("Missing the required parameter `customer_id` when calling `assign_asset_to_customer_using_post`") # noqa: E501
# verify the required parameter 'asset_id' is set
if ('asset_id' not in params or
params['asset_id'] is None):
raise ValueError("Missing the required parameter `asset_id` when calling `assign_asset_to_customer_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'customer_id' in params:
path_params['customerId'] = params['customer_id'] # noqa: E501
if 'asset_id' in params:
path_params['assetId'] = params['asset_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/customer/{customerId}/asset/{assetId}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Asset', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def assign_asset_to_public_customer_using_post(self, asset_id, **kwargs): # noqa: E501
"""assignAssetToPublicCustomer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.assign_asset_to_public_customer_using_post(asset_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str asset_id: assetId (required)
:return: Asset
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.assign_asset_to_public_customer_using_post_with_http_info(asset_id, **kwargs) # noqa: E501
else:
(data) = self.assign_asset_to_public_customer_using_post_with_http_info(asset_id, **kwargs) # noqa: E501
return data
def assign_asset_to_public_customer_using_post_with_http_info(self, asset_id, **kwargs): # noqa: E501
"""assignAssetToPublicCustomer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.assign_asset_to_public_customer_using_post_with_http_info(asset_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str asset_id: assetId (required)
:return: Asset
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['asset_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
params[key] = val
del params['kwargs']
# verify the required parameter 'asset_id' is set
if ('asset_id' not in params or
params['asset_id'] is None):
raise ValueError("Missing the required parameter `asset_id` when calling `assign_asset_to_public_customer_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'asset_id' in params:
path_params['assetId'] = params['asset_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/customer/public/asset/{assetId}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Asset', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_asset_using_delete(self, asset_id, **kwargs): # noqa: E501
"""deleteAsset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.delete_asset_using_delete(asset_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str asset_id: assetId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_asset_using_delete_with_http_info(asset_id, **kwargs) # noqa: E501
else:
(data) = self.delete_asset_using_delete_with_http_info(asset_id, **kwargs) # noqa: E501
return data
def delete_asset_using_delete_with_http_info(self, asset_id, **kwargs): # noqa: E501
"""deleteAsset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.delete_asset_using_delete_with_http_info(asset_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str asset_id: assetId (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['asset_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
params[key] = val
del params['kwargs']
# verify the required parameter 'asset_id' is set
if ('asset_id' not in params or
params['asset_id'] is None):
raise ValueError("Missing the required parameter `asset_id` when calling `delete_asset_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'asset_id' in params:
path_params['assetId'] = params['asset_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/asset/{assetId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def find_by_query_using_post(self, query, **kwargs): # noqa: E501
"""findByQuery # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.find_by_query_using_post(query, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AssetSearchQuery query: query (required)
:return: list[Asset]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.find_by_query_using_post_with_http_info(query, **kwargs) # noqa: E501
else:
(data) = self.find_by_query_using_post_with_http_info(query, **kwargs) # noqa: E501
return data
def find_by_query_using_post_with_http_info(self, query, **kwargs): # noqa: E501
"""findByQuery # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.find_by_query_using_post_with_http_info(query, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AssetSearchQuery query: query (required)
:return: list[Asset]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['query'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
params[key] = val
del params['kwargs']
# verify the required parameter 'query' is set
if ('query' not in params or
params['query'] is None):
raise ValueError("Missing the required parameter `query` when calling `find_by_query_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'query' in params:
body_params = params['query']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
| |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from .import attestation_pb2 as attestation__pb2
from .import beacon_block_pb2 as beacon__block__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from .import validator_pb2 as validator__pb2
class BeaconNodeValidatorStub(object):
"""Beacon node validator API
The beacon node validator API enables a validator to connect
and perform its obligations on the Ethereum 2.0 phase 0 beacon chain.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetDuties = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/GetDuties',
request_serializer=validator__pb2.DutiesRequest.SerializeToString,
response_deserializer=validator__pb2.DutiesResponse.FromString,
)
self.StreamDuties = channel.stream_stream(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/StreamDuties',
request_serializer=validator__pb2.DutiesRequest.SerializeToString,
response_deserializer=validator__pb2.DutiesResponse.FromString,
)
self.DomainData = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/DomainData',
request_serializer=validator__pb2.DomainRequest.SerializeToString,
response_deserializer=validator__pb2.DomainResponse.FromString,
)
self.WaitForChainStart = channel.unary_stream(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/WaitForChainStart',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=validator__pb2.ChainStartResponse.FromString,
)
self.WaitForSynced = channel.unary_stream(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/WaitForSynced',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=validator__pb2.SyncedResponse.FromString,
)
self.WaitForActivation = channel.unary_stream(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/WaitForActivation',
request_serializer=validator__pb2.ValidatorActivationRequest.SerializeToString,
response_deserializer=validator__pb2.ValidatorActivationResponse.FromString,
)
self.ValidatorIndex = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/ValidatorIndex',
request_serializer=validator__pb2.ValidatorIndexRequest.SerializeToString,
response_deserializer=validator__pb2.ValidatorIndexResponse.FromString,
)
self.ValidatorStatus = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/ValidatorStatus',
request_serializer=validator__pb2.ValidatorStatusRequest.SerializeToString,
response_deserializer=validator__pb2.ValidatorStatusResponse.FromString,
)
self.GetBlock = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/GetBlock',
request_serializer=validator__pb2.BlockRequest.SerializeToString,
response_deserializer=beacon__block__pb2.BeaconBlock.FromString,
)
self.ProposeBlock = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/ProposeBlock',
request_serializer=beacon__block__pb2.SignedBeaconBlock.SerializeToString,
response_deserializer=validator__pb2.ProposeResponse.FromString,
)
self.GetAttestationData = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/GetAttestationData',
request_serializer=validator__pb2.AttestationDataRequest.SerializeToString,
response_deserializer=attestation__pb2.AttestationData.FromString,
)
self.ProposeAttestation = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/ProposeAttestation',
request_serializer=attestation__pb2.Attestation.SerializeToString,
response_deserializer=validator__pb2.AttestResponse.FromString,
)
self.SubmitAggregateSelectionProof = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/SubmitAggregateSelectionProof',
request_serializer=validator__pb2.AggregateSelectionRequest.SerializeToString,
response_deserializer=validator__pb2.AggregateSelectionResponse.FromString,
)
self.SubmitSignedAggregateSelectionProof = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/SubmitSignedAggregateSelectionProof',
request_serializer=validator__pb2.SignedAggregateSubmitRequest.SerializeToString,
response_deserializer=validator__pb2.SignedAggregateSubmitResponse.FromString,
)
self.ProposeExit = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/ProposeExit',
request_serializer=beacon__block__pb2.SignedVoluntaryExit.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.SubscribeCommitteeSubnets = channel.unary_unary(
'/ethereum.eth.v1alpha1.BeaconNodeValidator/SubscribeCommitteeSubnets',
request_serializer=validator__pb2.CommitteeSubnetsSubscribeRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class BeaconNodeValidatorServicer(object):
"""Beacon node validator API
The beacon node validator API enables a validator to connect
and perform its obligations on the Ethereum 2.0 phase 0 beacon chain.
"""
def GetDuties(self, request, context):
"""Retrieves validator duties for the requested validators.
The duties consist of:
Proposer - the validator that creates a beacon chain block.
Attester — a validator that is part of a committee that needs to sign off on a beacon chain
block while simultaneously creating a cross link to a recent shard block on a particular shard chain.
The server returns a list of duties which are the actions should be performed by validators for a given epoch.
Validator duties should be polled every epoch, but due to chain reorg of >MIN_SEED_LOOKAHEAD could occur,
the validator duties could chain. For complete safety, it is recommended to poll at every slot to ensure
validator is fully aware of any sudden chain reorg.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StreamDuties(self, request_iterator, context):
"""Stream validator duties for the requested validators.
The duties consist of:
Proposer - the validator that creates a beacon chain block.
Attester — a validator that is part of a committee that needs to sign off on a beacon chain
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DomainData(self, request, context):
"""DomainData fetches the current BLS signature domain version information from the
running beacon node's state. This information is used when validators sign
blocks and attestations appropriately based on their duty.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def WaitForChainStart(self, request, context):
"""WaitForChainStart queries the logs of the Validator Deposit Contract on the Ethereum
proof-of-work chain to verify the beacon chain has started its runtime and
validators are ready to begin their responsibilities.
If the chain has not yet started, this endpoint starts a server-side stream which updates
the client when the beacon chain is ready.
This RPC is deprecated. Please use WaitForSynced.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def WaitForSynced(self, request, context):
"""WaitForSynced checks if the beacon node is synced and ready to communicate with the validator.
If the node is not synced yet, this endpoint starts a server-side stream which updates
the validator client when the beacon chain is ready.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def WaitForActivation(self, request, context):
"""WaitForActivation checks if a validator public key exists in the active validator
registry of the current beacon state. If the validator is NOT yet active, it starts a
server-side stream which updates the client whenever the validator becomes active in
the beacon node's state.
The input to this endpoint is a list of validator public keys, and the corresponding
stream will respond until at least a single corresponding validator to those
keys is activated.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ValidatorIndex(self, request, context):
"""ValidatorIndex retrieves a validator's index location in the beacon state's
validator registry looking up whether the validator exists based on its
public key. This method returns NOT_FOUND if no index is found for the public key
specified in the request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ValidatorStatus(self, request, context):
"""ValidatorStatus returns a validator's status based on the current epoch.
The request can specify either a validator's public key or validator index.
The status response can be one of the following:
DEPOSITED - validator's deposit has been recognized by Ethereum 1, not yet recognized by Ethereum 2.
PENDING - validator is in Ethereum 2's activation queue.
ACTIVE - validator is active.
EXITING - validator has initiated an an exit request, or has dropped below the ejection balance and is being kicked out.
EXITED - validator is no longer validating.
SLASHING - validator has been kicked out due to meeting a slashing condition.
UNKNOWN_STATUS - validator does not have a known status in the network.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetBlock(self, request, context):
"""Retrieves the latest valid beacon block to be proposed on the beacon chain.
The server returns a new beacon block, without proposer signature, that can be
proposed on the beacon chain. The block should be filled with all the necessary
data for proposer to sign.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ProposeBlock(self, request, context):
"""Sends the newly signed beacon block to beacon node.
The validator sends the newly signed beacon block to the beacon node so the beacon block can
be included in the beacon chain. The beacon node is expected to validate and process the
beacon block into its state.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetAttestationData(self, request, context):
"""Retrieves the latest valid attestation data to be attested on the beacon chain.
The server returns the latest valid data which represents the correct vote
for the head of the beacon chain,
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ProposeAttestation(self, request, context):
"""Sends the newly signed attestation to beacon node.
The validator sends the newly signed attestation to the beacon node for the attestation to
be included in the beacon chain. The beacon node is expected to validate and publish attestation on
appropriate committee subnet.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubmitAggregateSelectionProof(self, request, context):
"""Submit selection proof to the beacon node to aggregate all matching wire attestations with the same data root.
the beacon node responses with an aggregate and proof object back to validator to sign over.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubmitSignedAggregateSelectionProof(self, request, context):
"""Submit a signed aggregate and proof object, the beacon node will broadcast the
signed aggregated attestation and proof object.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ProposeExit(self, request, context):
"""Propose to leave the list of active validators.
The beacon node is expected to validate the request and make it available for inclusion in
the next proposed block.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SubscribeCommitteeSubnets(self, request, context):
"""Subscribe to particular committee ID subnets given validator's duty.
The beacon node is expected to subscribe to the committee ID subnet given by the request. With this,
beacon node serving attesters can find persistent peers on the subnet to publish attestation,
and beacon node serving aggregator can join the subnet.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_BeaconNodeValidatorServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetDuties': grpc.unary_unary_rpc_method_handler(
servicer.GetDuties,
request_deserializer=validator__pb2.DutiesRequest.FromString,
response_serializer=validator__pb2.DutiesResponse.SerializeToString,
),
'StreamDuties': grpc.stream_stream_rpc_method_handler(
servicer.StreamDuties,
request_deserializer=validator__pb2.DutiesRequest.FromString,
response_serializer=validator__pb2.DutiesResponse.SerializeToString,
),
'DomainData': grpc.unary_unary_rpc_method_handler(
servicer.DomainData,
request_deserializer=validator__pb2.DomainRequest.FromString,
response_serializer=validator__pb2.DomainResponse.SerializeToString,
),
'WaitForChainStart': grpc.unary_stream_rpc_method_handler(
servicer.WaitForChainStart,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=validator__pb2.ChainStartResponse.SerializeToString,
),
'WaitForSynced': grpc.unary_stream_rpc_method_handler(
servicer.WaitForSynced,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=validator__pb2.SyncedResponse.SerializeToString,
),
'WaitForActivation': grpc.unary_stream_rpc_method_handler(
servicer.WaitForActivation,
request_deserializer=validator__pb2.ValidatorActivationRequest.FromString,
response_serializer=validator__pb2.ValidatorActivationResponse.SerializeToString,
),
'ValidatorIndex': grpc.unary_unary_rpc_method_handler(
servicer.ValidatorIndex,
request_deserializer=validator__pb2.ValidatorIndexRequest.FromString,
response_serializer=validator__pb2.ValidatorIndexResponse.SerializeToString,
),
| |
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Module with class definitions for Sequence, Gene, mRNA and Transcript
by <NAME>
<EMAIL>
Last update 3/10/2022
"""
class Sequence():
"""DNA sequence Class"""
def __init__(self, seq_type, seq_range, orientation):
"""Sequence class constructor.
Parameters
----------
seq_type : String
Description of sequence type (gene, mrna, etc).
seq_range : Tuple
Tuple containing sequence start and end.
orientation : String
Sequence orientation can be '+' for sense strand and '-' for
antisense strand.
Returns
-------
None.
"""
self.ID = ''
self.seq_type = seq_type
self.seq_range = seq_range
self.parent = ''
self.geneID = ''
self.chromosome = ''
self.orientation = orientation
self.repeats = []
def __eq__(self, other_sequence):
"""Compare if two sequences are equal.
Method that evaluates if two sequences are equal based on the
sequence start and end.
Parameters
----------
other_sequence : Sequence
DESCRIPTION.
Returns
-------
Bool
Returns True if the tow sequences have the same sequence range.
"""
other_seq_range = other_sequence.get_seq_range()
return self.seq_range == other_seq_range
def __str__(self):
"""Generate string representation of Sequence.
Returns
-------
String
Returns a string corresponding to the sequence range.
"""
return str(self.seq_range)
def add_repeat(self, repeat_seq):
"""Append a repeat sequence to repeat list.
Parameters
----------
repeat_seq : sequence
Repeat sequence.
Returns
-------
None.
"""
self.repeats.append(repeat_seq)
def get_chromosome(self):
"""Get chromosome ID.
Returns
-------
String
Chromosome identification.
"""
return self.chromosome
def get_distance(self, another_sequence):
"""Get another_sequence distance from to 5' or 3'.
Method to get the sistance of another_sequence start to either 5' or 3'
end of this sequence instance.
Parameters
----------
another_sequence : Sequence
Sequence instance.
Returns
-------
distance : Int
Distance from start or end. A positive value indicates that
position is closer to the 5' start of this sequence, while
a negative value indicates that the other sequence is closer to
the 3' end of this sequence.
"""
# get other sequence range
other_start, other_end = another_sequence.get_seq_range()
# start of sequence depends on orientation
if self.orientation == '-':
end, start = self.seq_range
position = other_end
else:
start, end = self.seq_range
position = other_start
# calculate distance to start and end
dist_to_start = abs(position - start)+1
dist_to_end = abs(end - position)+1
# check which distance is smaller
if dist_to_start <= dist_to_end:
distance = dist_to_start
else:
distance = -dist_to_end
return distance
def get_geneID(self):
"""Get geneID.
Returns
-------
String
GeneID.
"""
return self.geneID
def get_ID(self):
"""Get sequence ID.
Returns
-------
String
Sequence ID.
"""
return self.ID
def get_orientation(self):
"""Get sequence orientation in the genome.
Returns
-------
String
Sequence orientation can be '+' for sense strand and '-' for
antisense strand.
"""
return self.orientation
def get_parent(self):
"""Get parent sequence ID.
Returns
-------
String
parent sequence ID.
"""
return self.parent
def get_repeats(self):
"""Get repeat list.
Method to get the list containing the repeats found in the
sequence.
Returns
-------
List
List of repeat sequence instances.
"""
return self.repeats
def get_seq_range(self):
"""Get sequence range.
Returns
-------
Tuple
Tuple containing sequence start and end.
"""
return self.seq_range
def get_seq_type(self):
"""Get sequence type.
Returns
-------
string
Description of sequence type (mRNA, gene, etc).
"""
return self.seq_type
def in_sequence(self, position):
"""Return True if sequence position inside sequence range.
Parameters
----------
position : Int
Position number in sequence.
Returns
-------
Bool
Returns True if positions is whithin the sequence start and
end, False otherwise.
"""
start, end = self.seq_range
return start <= position and end >= position
def is_subsequence(self, other_sequence):
"""Check if other sequence is subsequence of this sequence.
To evaluate if the other sequence is subsequence of this sequence
instance both sequences must have the same orientation and other
sequence range must be within this sequence start and end.
Parameters
----------
other_sequence : Sequence
Another Sequence instance.
Returns
-------
Bool
Returns True if other_sequence is subsequence of this instance.
"""
other_start, other_end = other_sequence.get_seq_range()
start, end = self.seq_range
same_orientation = self.orientation == other_sequence.get_orientation()
return start <= other_start and end >= other_end and same_orientation
def set_orientation(self, orientation):
"""Set sequence orientation.
Parameters
----------
orientation : String
Sequence orientation can be '+' for sense strand and '-' for
antisense strand.
Returns
-------
None.
"""
self.orientation = orientation
def set_chromosome(self, chromosome):
"""Set chromosome ID where sequence is found.
Parameters
----------
chromosome : String
Chromosome ID
Returns
-------
None.
"""
self.chromosome = chromosome
def set_ID(self, ID):
"""Set sequence ID.
Parameters
----------
ID : String
Sequence identification.
Returns
-------
None.
"""
self.ID = ID
def set_geneID(self, geneID):
"""Set geneID to sequence instance.
Parameters
----------
geneID : String
Sequence geneID.
Returns
-------
None.
"""
self.geneID = geneID
def set_parent(self, parent):
"""Set parent sequence ID.
Parameters
----------
parent : String
parent sequence ID.
Returns
-------
None.
"""
self.parent = parent
class Transcript(Sequence):
def __init__(self, seq_type, seq_range, orientation):
"""Transcript class constructor.
Parameters
----------
seq_type : String
Description of sequence type (gene, mrna, etc).
seq_range : Tuple
Tuple containing sequence start and end.
orientation : String
Sequence orientation can be '+' for sense strand and '-' for
antisense strand.
Returns
-------
None.
"""
super().__init__(seq_type, seq_range, orientation)
self.exons = []
def __str__(self):
"""String represenatation of Transcript.
Returns
-------
line : String
String represenatation of Transcript including sequence type, ID,
Start and end.
"""
# get seqeunce range
start, end = self.seq_range
line = '{:19} {:19} {:10} {:10} exons: '.format(self.seq_type,
self.ID, start, end)
# Add exon range to line
for exon in self.exons:
start, end = exon.get_seq_range()
line += '{}-{} '.format(start, end)
return line
def add_exon(self, exon):
"""Append exon sequence to list of exons
Parameters
----------
exon : Sequence
Exon sequence instance.
Returns
-------
None.
"""
self.exons.append(exon)
def get_exons(self):
"""Get exon list.
Returns
-------
List
List of exon sequences in the transcript.
"""
return self.exons
class Gene(Sequence):
def __init__(self, seq_type, seq_range, orientation):
"""Gene class constructor.
Parameters
----------
seq_type : String
Description of sequence type (gene, mrna, etc).
seq_range : Tuple
Tuple containing sequence start and end.
orientation : String
Sequence orientation can be '+' for sense strand and '-' for
antisense strand.
Returns
-------
None.
"""
super().__init__(seq_type, seq_range, orientation)
self.orientation = orientation
self.name = ''
self.exons = []
self.unspliced = []
self.transcripts = {}
self.repeats = []
self.mRNA_repeats = []
self.mRNAs = []
def __str__(self):
"""String representation of gene instance.
Returns
-------
line : String
String representation of gene regarding geneID, name, sequence type,
range, orientation and chromosome.
"""
start, end = self.seq_range
line = '{:10} {:12} {:8} {:<10} {:<10} {} {:<12} '.format(self.geneID,
self.name,
self.seq_type,
start, end,
self.orientation,
self.chromosome)
return line
def get_data_dict(self):
"""Get data in the Gene instance as dictionary.
Method that collects all data in the Gene instance. This includes each
Transcript and mRNA and their derived exons, UTRs and CDS. This data is
used to generate the Json output file.
Returns
-------
gene_dict : Dictionary
Dictionary with all gene data.
"""
gene_dict = {'geneID': self.geneID, 'Name': self.name,
'seq_type': self.seq_type, 'seq_range': self.seq_range,
'orientation': self.orientation,
'chromosome': self.chromosome, 'transcripts': {}}
transcripts = self.get_transcripts()
# fetch each transcript data
for ID in transcripts:
transcript = transcripts[ID]
transcript_dict = {}
transcript_dict['seq_type'] = transcript.get_seq_type()
transcript_dict['ID'] = transcript.get_ID()
transcript_dict['seq_range'] = transcript.get_seq_range()
# get exons
transcript_dict['exons'] = []
for exon in transcript.get_exons():
transcript_dict['exons'].append(exon.get_seq_range())
# get info for mRNA
if transcript.get_seq_type() == 'mRNA':
if transcript.get_CDSs() != []:
# Calculate UTRs
transcript.get_5pUTR()
transcript.get_3pUTR()
# get all sequences as list
mrna_subseq = transcript.get_mRNA_subsequences()
for seq in mrna_subseq:
seq_type = seq.get_seq_type()
seq_range = seq.get_seq_range()
if seq_type not in transcript_dict:
transcript_dict[seq_type] = [seq_range]
else:
transcript_dict[seq_type].append(seq_range)
gene_dict['transcripts'][ID] = transcript_dict
return gene_dict
def get_exons(self):
"""Get exon list.
Returns
-------
List
List of exon sequences in the transcript.
"""
return self.exons
def get_unspliced(self):
"""Get exon-intron list.
Returns
-------
List
List of unique exon and intron sequences in the gene.
"""
return self.unspliced
def get_name(self):
"""Gete gene name.
Returns
-------
String
Gene name.
"""
return self.name
def set_name(self, name):
"""Set gene name.
Parameters
----------
name : String
Gene name.
Returns
-------
None.
"""
self.name = name
def get_transcripts(self):
"""Get transcript dictionary'
Returns
-------
Dictionary
Dictionary containing all transcript instances by ID.
"""
return self.transcripts
def get_mRNA_repats(self):
"""Get list of repeats found at mRNAs
| |
self,
process_code: str = None,
):
# 表单的唯一码
self.process_code = process_code
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.process_code is not None:
result['processCode'] = self.process_code
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('processCode') is not None:
self.process_code = m.get('processCode')
return self
class QuerySchemaByProcessCodeResponseBodyResultSchemaContentItemsPropsStatField(TeaModel):
def __init__(
self,
id: str = None,
label: str = None,
upper: bool = None,
unit: str = None,
):
# id 值。
self.id = id
# 名称。
self.label = label
# 大写。
self.upper = upper
# 单位。
self.unit = unit
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.id is not None:
result['id'] = self.id
if self.label is not None:
result['label'] = self.label
if self.upper is not None:
result['upper'] = self.upper
if self.unit is not None:
result['unit'] = self.unit
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('id') is not None:
self.id = m.get('id')
if m.get('label') is not None:
self.label = m.get('label')
if m.get('upper') is not None:
self.upper = m.get('upper')
if m.get('unit') is not None:
self.unit = m.get('unit')
return self
class QuerySchemaByProcessCodeResponseBodyResultSchemaContentItemsPropsObjOptions(TeaModel):
def __init__(
self,
value: str = None,
):
self.value = value
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.value is not None:
result['value'] = self.value
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('value') is not None:
self.value = m.get('value')
return self
class QuerySchemaByProcessCodeResponseBodyResultSchemaContentItemsPropsPush(TeaModel):
def __init__(
self,
push_switch: int = None,
push_tag: str = None,
attendance_rule: int = None,
):
# 开启状态(1表示开启, 0表示关闭)
self.push_switch = push_switch
# 状态显示名称
self.push_tag = push_tag
# 考勤类型(1表示请假, 2表示出差, 3表示加班, 4表示外出)
self.attendance_rule = attendance_rule
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.push_switch is not None:
result['pushSwitch'] = self.push_switch
if self.push_tag is not None:
result['pushTag'] = self.push_tag
if self.attendance_rule is not None:
result['attendanceRule'] = self.attendance_rule
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('pushSwitch') is not None:
self.push_switch = m.get('pushSwitch')
if m.get('pushTag') is not None:
self.push_tag = m.get('pushTag')
if m.get('attendanceRule') is not None:
self.attendance_rule = m.get('attendanceRule')
return self
class QuerySchemaByProcessCodeResponseBodyResultSchemaContentItemsPropsBehaviorLinkageTargets(TeaModel):
def __init__(
self,
field_id: str = None,
behavior: str = None,
):
# 字段 id。
self.field_id = field_id
# 行为。
self.behavior = behavior
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.field_id is not None:
result['fieldId'] = self.field_id
if self.behavior is not None:
result['behavior'] = self.behavior
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('fieldId') is not None:
self.field_id = m.get('fieldId')
if m.get('behavior') is not None:
self.behavior = m.get('behavior')
return self
class QuerySchemaByProcessCodeResponseBodyResultSchemaContentItemsPropsBehaviorLinkage(TeaModel):
def __init__(
self,
value: str = None,
targets: List[QuerySchemaByProcessCodeResponseBodyResultSchemaContentItemsPropsBehaviorLinkageTargets] = None,
):
# 控件值。
self.value = value
# 关联控件列表。
self.targets = targets
def validate(self):
if self.targets:
for k in self.targets:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.value is not None:
result['value'] = self.value
result['targets'] = []
if self.targets is not None:
for k in self.targets:
result['targets'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('value') is not None:
self.value = m.get('value')
self.targets = []
if m.get('targets') is not None:
for k in m.get('targets'):
temp_model = QuerySchemaByProcessCodeResponseBodyResultSchemaContentItemsPropsBehaviorLinkageTargets()
self.targets.append(temp_model.from_map(k))
return self
class QuerySchemaByProcessCodeResponseBodyResultSchemaContentItemsProps(TeaModel):
def __init__(
self,
id: str = None,
label: str = None,
biz_alias: str = None,
required: bool = None,
placeholder: str = None,
options: List[str] = None,
app_id: int = None,
duration_label: str = None,
push_to_calendar: int = None,
align: str = None,
stat_field: List[QuerySchemaByProcessCodeResponseBodyResultSchemaContentItemsPropsStatField] = None,
hide_label: bool = None,
obj_options: List[QuerySchemaByProcessCodeResponseBodyResultSchemaContentItemsPropsObjOptions] = None,
format: str = None,
push_to_attendance: bool = None,
label_editable_freeze: bool = None,
push: QuerySchemaByProcessCodeResponseBodyResultSchemaContentItemsPropsPush = None,
common_biz_type: str = None,
required_editable_freeze: bool = None,
unit: str = None,
extract: bool = None,
link: str = None,
pay_enable: bool = None,
hidden: bool = None,
biz_type: str = None,
staff_status_enabled: bool = None,
action_name: str = None,
attend_type_label: str = None,
child_field_visible: bool = None,
not_print: str = None,
vertical_print: bool = None,
duration: bool = None,
holiday_options: str = None,
use_calendar: bool = None,
hidden_in_approval_detail: bool = None,
disabled: bool = None,
async_condition: bool = None,
behavior_linkage: List[QuerySchemaByProcessCodeResponseBodyResultSchemaContentItemsPropsBehaviorLinkage] = None,
show_attend_options: bool = None,
not_upper: str = None,
fields_info: str = None,
e_sign: bool = None,
main_title: str = None,
formula: str = None,
choice: int = None,
):
# 控件 id。
self.id = id
# 控件名称。
self.label = label
# 控件业务自定义别名。
self.biz_alias = biz_alias
# 是否必填。
self.required = required
# 占位符。
self.placeholder = placeholder
# 单选框选项列表。
self.options = options
# ISV 微应用 appId,用于ISV身份权限识别,ISV可获得相应数据。
self.app_id = app_id
# 兼容字段。
self.duration_label = duration_label
# 是否推送管理日历(DDDateRangeField, 1表示推送, 0表示不推送, 该属性为兼容保留)。
self.push_to_calendar = push_to_calendar
# textnote的样式,top|middle|bottom。
self.align = align
# 需要计算总和的明细组件
self.stat_field = stat_field
# 加班套件4.0新增 加班明细是否隐藏标签。
self.hide_label = hide_label
# 选项内容列表,提供给业务方更多的选择器操作。
self.obj_options = obj_options
# 时间格式(DDDateField和DDDateRangeField)。
self.format = format
# 推送到考勤, 子类型(DDSelectField)。
self.push_to_attendance = push_to_attendance
# label是否可修改 true:不可修改。
self.label_editable_freeze = label_editable_freeze
# 同步到考勤, 表示是否设置为员工状态。
self.push = push
# common field的commonBizType。
self.common_biz_type = common_biz_type
# 必填是否可修改 true:不可修改。
self.required_editable_freeze = required_editable_freeze
# 数字组件/日期区间组件单位属性。
self.unit = unit
# 套件值是否打平
self.extract = extract
# 说明文案的链接地址。
self.link = link
# 是否有支付属性。
self.pay_enable = pay_enable
# 加班套件4.0新增 加班明细是否隐藏。
self.hidden = hidden
# 业务套件类型。
self.biz_type = biz_type
# 是否开启员工状态。
self.staff_status_enabled = staff_status_enabled
# 加班套件4.0新增 加班明细名称。
self.action_name = action_name
# 请假、出差、外出、加班类型标签。
self.attend_type_label = attend_type_label
# 套件内子组件可见性。
self.child_field_visible = child_field_visible
# 是否参与打印(1表示不打印, 0表示打印)。
self.not_print = not_print
# 明细打印排版方式 false:横向 true:纵向。
self.vertical_print = vertical_print
# 是否自动计算时长。
self.duration = duration
# 兼容出勤套件类型。
self.holiday_options = holiday_options
# 是否使用考勤日历。
self.use_calendar = use_calendar
# textnote在详情页是否隐藏,true隐藏, false不隐藏
self.hidden_in_approval_detail = hidden_in_approval_detail
# 是否可编辑。
self.disabled = disabled
# 套件是否开启异步获取分条件规则,true:开启;false:不开启。
self.async_condition = async_condition
# 表单关联控件列表。
self.behavior_linkage = behavior_linkage
# 兼容出勤套件类型。
self.show_attend_options = show_attend_options
# 是否需要大写 默认是需要; 1:不需要大写, 空或者0:需要大写。
self.not_upper = not_upper
# 关联表单中的fields存储
self.fields_info = fields_info
# e签宝专用标识。
self.e_sign = e_sign
# 加班套件4.0新增 加班明细描述。
self.main_title = main_title
# 公式。
self.formula = formula
# 内部联系人choice,1表示多选,0表示单选。
self.choice = choice
def validate(self):
if self.stat_field:
for k in self.stat_field:
if k:
k.validate()
if self.obj_options:
for k in self.obj_options:
if k:
k.validate()
if self.push:
self.push.validate()
if self.behavior_linkage:
for k in self.behavior_linkage:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.id is not None:
result['id'] = self.id
if self.label is not None:
result['label'] = self.label
if self.biz_alias is not None:
result['bizAlias'] = self.biz_alias
if self.required is not None:
result['required'] = self.required
if self.placeholder is not None:
result['placeholder'] = self.placeholder
if self.options is not None:
result['options'] = self.options
if self.app_id is not None:
result['appId'] = self.app_id
if self.duration_label is not None:
result['durationLabel'] = self.duration_label
if self.push_to_calendar is not None:
result['pushToCalendar'] = self.push_to_calendar
if self.align is not None:
result['align'] = self.align
result['statField'] = []
if self.stat_field is not None:
for k in self.stat_field:
result['statField'].append(k.to_map() if k else None)
if self.hide_label is not None:
result['hideLabel'] = self.hide_label
result['objOptions'] = []
if self.obj_options is not None:
for k in self.obj_options:
result['objOptions'].append(k.to_map() if k else None)
if self.format is not None:
result['format'] = self.format
if self.push_to_attendance is not None:
result['pushToAttendance'] = self.push_to_attendance
if self.label_editable_freeze is not None:
result['labelEditableFreeze'] = self.label_editable_freeze
if self.push is not None:
result['push'] = self.push.to_map()
if self.common_biz_type is not None:
result['commonBizType'] = self.common_biz_type
if self.required_editable_freeze is not None:
result['requiredEditableFreeze'] = self.required_editable_freeze
if self.unit is not None:
result['unit'] = self.unit
if self.extract is not None:
result['extract'] = self.extract
if self.link is not None:
result['link'] = self.link
if self.pay_enable is not None:
result['payEnable'] = self.pay_enable
if self.hidden is not None:
result['hidden'] = self.hidden
if self.biz_type is not None:
result['bizType'] = self.biz_type
if self.staff_status_enabled is not | |
from . import cilast as ast
from . import visitor
from .scope import VariableInfo
from .mipsutils import datatype, op, reg
class Cil2MipsVisitor:
def __init__(self, context):
self.dotdata = []
self.dotcode = []
self.context = context
# ======================================================================
# =[ UTILS ]============================================================
# ======================================================================
def init_str_funcs(self):
self.init_str_length()
self.init_str_concat()
self.init_str_substr()
def init_str_concat(self):
self.emit_label('String_concat')
self.emit_code(f'move $fp, $sp')
self.visit(ast.CILAllocate(None, 'Int')) # Create new Int object
self.emit_code('move $v1 $v0') # Save new Int Object
self.visit(ast.CILAllocate(None, 'String')) # Create new String object
self.emit_code('move $t3 $v0') # Store new String object
self.emit_code('lw $a1 12($fp)') # Self
self.emit_code('lw $a2 16($fp)') # Boxed String to concat
self.emit_code('lw $t1 12($a1)') # Self's length Int object
self.emit_code('lw $t1 12($t1)') # Self's length
self.emit_code('lw $t2 12($a2)') # strings to concat's length Int object
self.emit_code('lw $t2 12($t2)') # strings to concat's length
self.emit_code('addu $t0 $t2 $t1') # New string's length
self.emit_code('sw $t0 12($v1)') # Store new string's length into box
self.emit_code('lw $a1 16($a1)') # Unbox strings
self.emit_code('lw $a2 16($a2)')
self.emit_code('addi $t0 $t0 1') # Add space for \0
self.allocate_memory('$t0', register=True) # Allocate memory for new string
self.emit_code('move $t5 $v0') # Keep the string's reference in v0 and use t7
self.emit_code('move $t4 $a1') # Index for iterating the self string
self.emit_code('addu $a1 $a1 $t1') # self's copy limit
self.emit_label('_strcat_copy_')
self.emit_code('beq $t4 $a1 _end_strcat_copy_') # No more characters to copy
self.emit_code('lb $a0 0($t4)') # Copy the character
self.emit_code('sb $a0 0($t5)')
self.emit_code('addi $t5 $t5 1') # Advance indices
self.emit_code('addi $t4 $t4 1')
self.emit_code('j _strcat_copy_')
self.emit_label('_end_strcat_copy_')
# Copy 2nd string
self.emit_code('move $t4 $a2') # Index for iterating the strings
self.emit_code('addu $a2 $a2 $t2') # self's copy limit
self.emit_label('_strcat_copy_snd_')
self.emit_code('beq $t4 $a2 _end_strcat_copy_snd_') # No more characters to copy
self.emit_code('lb $a0 0($t4)') # Copy the character
self.emit_code('sb $a0 0($t5)')
self.emit_code('addi $t5 $t5 1') # Advance indices
self.emit_code('addi $t4 $t4 1')
self.emit_code('j _strcat_copy_snd_')
self.emit_label('_end_strcat_copy_snd_')
self.emit_code('sb $0 0($t5)') # End string with \0
self.emit_code('sw $v1 12($t3)') # New length
self.emit_code('sw $v0 16($t3)') # New string
self.emit_code('move $v0 $t3') # Return new String object in $v0
self.emit_code('jr $ra')
self.emit_code('')
def init_str_substr(self):
self.emit_label('String_substr')
self.emit_instruction(op.move, reg.fp, reg.sp)
self.emit_instruction(op.lw, reg.t5, self.off_reg(0, reg.fp, 12))
self.emit_instruction(op.lw, reg.a1, self.off_reg(1, reg.fp, 12))
self.emit_instruction(op.lw, reg.a1, self.off_reg(0, reg.a1, 12))
self.emit_instruction(op.lw, reg.a2, self.off_reg(2, reg.fp, 12))
self.emit_instruction(op.lw, reg.a2, self.off_reg(0, reg.a2, 12))
self.emit_instruction(op.blt, reg.a1, '$0', '_index_negative')
self.emit_instruction(op.blt, reg.a2, '$0', '_index_negative')
self.emit_instruction(op.add, reg.a2, reg.a1, reg.a2)
self.emit_instruction(op.lw, reg.a3, self.off_reg(0, reg.t5, 12))
self.emit_instruction(op.lw, reg.a3, self.off_reg(0, reg.a3, 12))
self.emit_instruction(op.bgt, reg.a2, reg.a3, '_index_out')
self.visit(ast.CILAllocate, None, 'String')
self.emit_instruction(op.move, reg.v1, reg.a0)
self.visit(ast.CILAllocate, None, 'Int')
self.emit_instruction(op.move, reg.t0, reg.a0)
self.emit_instruction(op.sw, reg.a2, self.off_reg(0, reg.t0, 12))
self.allocate_memory('$a2', register=True) # $v0 -> address of the string
self.emit_instruction(op.sw, reg.t0, self.off_reg(0, reg.v1, 12))
self.emit_instruction(op.sw, reg.v0, self.off_reg(1, reg.v1, 12))
self.emit_instruction(op.move, reg.t1, reg.v0)
self.emit_instruction(op.lw, reg.t5, self.off_reg(1, reg.t5, 12))
self.emit_instruction(op.move, reg.t4, reg.t5)
self.emit_instruction(op.addu, reg.t4, reg.t4, reg.a1)
self.emit_instruction(op.addu, reg.t5, reg.t5, reg.a2)
self.emit_label('_substr_copy')
self.emit_instruction(op.bge, reg.t4, reg.t5, '_end_substr_copy')
self.emit_instruction(op.lb, reg.a0, self.off_reg(0, reg.t4))
self.emit_instruction(op.sb, reg.a0, self.off_reg(0, reg.t1))
self.emit_instruction(op.addi, reg.t1, reg.t1, 1)
self.emit_instruction(op.addi, reg.t4, reg.t4, 1)
self.emit_instruction(op.j, '_substr_copy')
self.emit_label('_index_negative')
self.emit_instruction(op.la, reg.a0, '_index_negative_msg')
self.emit_instruction(op.b, '_subst_abort')
self.emit_label('_index_out')
self.emit_instruction(op.la, reg.a0, '_index_out_msg')
self.emit_instruction(op.b, '_subst_abort')
self.emit_label('_subst_abort')
self.emit_instruction(op.li, reg.v0, 4)
self.emit_instruction(op.syscall)
self.emit_instruction(op.la, reg.a0, '_abort_msg')
self.emit_instruction(op.li, reg.v0, 4)
self.emit_instruction(op.syscall)
self.emit_instruction(op.li, reg.v0, 10)
self.emit_instruction(op.syscall)
self.emit_label('_end_substr_copy')
self.emit_instruction(op.move, reg.a0, reg.v1)
self.emit_instruction(op.jr, reg.ra)
def init_str_length(self):
self.emit_label('String_length')
self.emit_instruction(op.lw, reg.a0, self.off_reg(1, reg.sp))
self.emit_instruction(op.li, reg.a1, 0)
self.emit_label('length_main_loop')
self.emit_instruction(op.lb, reg.t1, self.off_reg(0, reg.a0))
self.emit_instruction(op.beqz, reg.t1, 'length_end')
self.emit_instruction(op.addi, reg.a0, reg.a0, 1)
self.emit_instruction(op.addi, reg.a1, reg.a1, 1)
self.emit_instruction(op.j, 'length_main_loop')
self.emit_label('length_end')
self.emit_instruction(op.move, reg.a0, reg.a1)
self.emit_instruction(op.jr, reg.ra)
def init_obj_funcs(self):
self.init_obj_abort()
self.init_obj_typename()
self.init_obj_copy()
def init_obj_abort(self):
self.emit_label('Object_abort')
self.emit_instruction(op.li, reg.v0, 10)
self.emit_instruction(op.syscall)
def init_obj_typename(self):
self.emit_label('Object_type_name')
self.emit_instruction(op.move, reg.fp, reg.sp)
self.visit(ast.CILAllocate(None, 'String'))
self.emit_instruction(op.move, reg.v1, reg.v0)
self.visit(ast.CILAllocate(None, 'Int'))
self.emit_instruction(op.lw, reg.a1, self.off_reg(3, reg.fp))
self.emit_instruction(op.lw, reg.a1, self.off_reg(0, reg.a1))
self.emit_instruction(op.mul, reg.a1, reg.a1, 4)
self.emit_instruction(op.addu, reg.a1, reg.a1, reg.s1)
self.emit_instruction(op.lw, reg.a1, self.off_reg(0, reg.a1))
self.emit_instruction(op.move, reg.a2, '$0')
self.emit_instruction(op.move, reg.t2, reg.a1)
self.emit_label('_str_len_clsname')
self.emit_instruction(op.lb, reg.a0, self.off_reg(0, reg.t2))
self.emit_instruction(op.beq, reg.a0, '$0', '_end_clsname_len')
self.emit_instruction(op.addu, reg.a2, reg.a2, 1)
self.emit_instruction(op.addu, reg.t2, reg.t2, 1)
self.emit_instruction(op.j, '_str_len_clsname')
self.emit_label('_end_clsname_len')
self.emit_instruction(op.sw, reg.a2, self.off_reg(3, reg.v0))
self.emit_instruction(op.sw, reg.v0, self.off_reg(3, reg.v1))
self.emit_instruction(op.sw, reg.a1, self.off_reg(4, reg.v1))
self.emit_instruction(op.move, reg.v0, reg.v1)
self.emit_instruction(op.jr, reg.ra)
def init_obj_copy(self):
self.emit_label('Object_copy')
self.emit_instruction(op.move, reg.fp, reg.sp)
self.emit_instruction(op.lw, reg.t0, self.off_reg(0, reg.fp, 12))
self.emit_instruction(op.lw, reg.a0, self.off_reg(1, reg.t0))
self.emit_instruction(op.move, reg.t4, reg.a0)
self.emit_instruction(op.li, reg.v0, 9)
self.emit_instruction(op.syscall)
self.emit_instruction(op.move, reg.t2, reg.v0)
self.emit_instruction(op.li, reg.t2, 0)
self.emit_label('Object_copy_loop')
self.emit_instruction(op.lw, reg.t1, self.off_reg(0, reg.t0))
self.emit_instruction(op.sw, reg.t1, self.off_reg(0, reg.v0))
self.emit_instruction(op.addi, reg.t0, reg.t0, 4)
self.emit_instruction(op.addi, reg.v0, reg.v0, 4)
self.emit_instruction(op.addi, reg.t3, reg.t3, 4)
self.emit_instruction(op.ble, reg.t4, reg.t3, 'Object_copy_loop')
self.emit_label('Object_copy_end')
self.emit_instruction(op.move, reg.v0, reg.t2)
self.emit_instruction(op.jr, reg.ra)
def init_io_funcs(self):
self.init_io_in_int()
self.init_io_in_str()
self.init_io_out_int()
self.init_io_out_str()
def init_io_in_int(self):
self.emit_label('IO_in_int')
self.emit_instruction(op.li, reg.v0, 5)
self.emit_instruction(op.syscall)
self.emit_instruction(op.move, reg.a0, reg.v0)
self.emit_instruction(op.jr, reg.ra)
def init_io_in_str(self):
self.emit_label('IO_in_string')
self.emit_instruction(op.li, reg.a1, 1024)
self.emit_instruction(op.li, reg.v0, 8)
self.emit_instruction(op.syscall)
self.emit_instruction(op.jr, reg.ra)
def init_io_out_int(self):
self.emit_label('IO_out_int')
self.emit_instruction(op.lw, reg.a0, self.off_reg(1, reg.sp))
self.emit_instruction(op.li, reg.v0, 1)
self.emit_instruction(op.syscall)
self.emit_instruction(op.jr, reg.ra)
def init_io_out_str(self):
self.emit_label('IO_out_string')
self.emit_instruction(op.lw, reg.a0, self.off_reg(1, reg.fp, 12))
self.emit_instruction(op.lw, reg.a0, self.off_reg(1, reg.a0))
self.emit_instruction(op.li, reg.v0, 4)
self.emit_instruction(op.syscall)
self.emit_instruction(op.jr, reg.ra)
def init_def_obj_func(self):
self.init_obj_funcs()
self.init_io_funcs()
self.init_str_funcs()
def init_utils(self):
self.dotdata.append('.data')
self.dotcode.append('.text')
def allocate_memory(self, size=None, register=False):
if register:
self.emit_instruction(op.move, reg.a0, size)
else:
if size:
self.emit_instruction(op.li, reg.a0, size)
self.emit_instruction(op.li, reg.v0, 9)
self.emit_instruction(op.syscall)
def pusha(self, excep = []):
dic = reg.__dict__
for key in dic.keys():
if not str(key).startswith('_') and not excep.__contains__(key):
self.push(dic[key])
def popa(self, excep = []):
dic = reg.__dict__
keys = list(dic.keys())
keys = list(reversed(keys))
for key in keys:
if not str(key).startswith('_') and not excep.__contains__(key):
self.pop(dic[key])
def emit_data_rec(self, ttype, data, label = None):
datas = ', '.join(map(lambda x: str(x), data))
to_emit = f'{ttype} {datas}'
if label:
to_emit = label + ': ' + to_emit
self.emit_data(to_emit)
def emit_code(self, msg):
self.dotcode.append(msg)
def emit_data(self, msg):
self.dotdata.append(msg)
def off_reg(self, off, register, delta=0):
offset = int(off) * 4 + delta
return f'{offset}({register})'
def push(self, register, off = 0):
self.emit_instruction(op.sw, register, self.off_reg(off, reg.sp))
self.emit_instruction(op.addi, reg.sp, reg.sp, -4)
# self.emit_code(f'sw {reg} {off * 4}($sp)')
# self.emit_code(f'addi $sp $sp -4')
def pop(self, register):
self.emit_instruction(op.lw, register, self.off_reg(4, reg.sp))
self.stack_allign()
# self.emit_code(f'lw {reg} 4($sp)')
# self.emit_code(f'addi $sp $sp 4')
def stack_allign(self):
self.emit_instruction(op.addi, reg.sp, reg.sp, 4)
# self.emit_code('addi $sp $sp 4')
def emit_label(self, label):
self.emit_code(label + ':')
def eval_infix_func(self, node):
self.visit(node.left)
self.push(reg.a0)
self.visit(node.right)
self.pop(reg.t1)
def infix_func(self, node, op, dest = reg.a0, source1 = reg.t1, source2 = reg.a0):
self.eval_infix_func(node)
self.emit_code(f'{op} {dest} {source1} {source2}')
def __emit_i__(self, inst, args):
result = inst
if len(args) > 0:
result += ' '
params = filter(lambda x: x != None, args)
result += ', '.join(map(lambda x: str(x), params))
self.emit_code(result)
def emit_instruction(self, inst: op, arg1: reg = None, arg2: reg = None, arg3: reg = None):
self.__emit_i__(inst, [arg1, arg2, arg3])
# ======================================================================
# =[ VISIT ]============================================================
# ======================================================================
@visitor.on('node')
def visit(self, node):
pass
@visitor.when(ast.CILProgram)
def visit(self, node: ast.CILProgram):
self.emit_code("\n# Program")
# Init some utility functions
self.init_utils()
# Visit every data node in Program
# self.emit_code("\n# .data:")
# print(node.dotdata)
for datanode in node.dotdata:
self.visit(datanode)
# Error mensages
self.emit_data('_index_negative_msg: .asciiz \"Index to substr is negative\\n\"')
self.emit_data('_index_out_msg: .asciiz \"Index out range exception\\n\"')
self.emit_data('_abort_msg: .asciiz \"Execution aborted\\n\"')
self.emit_data('_div_zero_msg: .asciiz \"Division by zero exception\\n\"')
# self.emit_code("\n# .types:")
# print(node.dottypes)
# Visit every type node in Program
for typenode in node.dottypes:
self.visit(typenode)
# self.emit_code("\n# .code:")
# print(node.dotcode)
# Visit every code node in Program
mips_funcs = [
'IO_in_int',
'IO_in_string',
'IO_out_int',
'IO_out_string',
'String_length',
'String_concat',
'String_substr',
]
for codenode in node.dotcode:
if codenode.fname not in mips_funcs:
self.visit(codenode)
self.init_def_obj_func()
@visitor.when(ast.CILType)
def visit(self, node: ast.CILType):
"""
Object layout:
- Class Tag
- Object Size
- Parent ptr
- Function 1
- Function 2
...
"""
# self.emit_code("\n# Type")
self.emit_data(f'{node.name}: .word {self.context.tags[node.name]}')
# self.emit_data_rec(datatype.word, [self.context.tags[node.name]], label=node.name)
# Generate virtual table for this type
# print(node.name)
# print(node.methods)
for method in node.methods:
# self.visit(method)
name = '_'.join(method.mname.split('_')[1:])
# print(method.mname, name)
self.emit_data(f'{node.name}_{name}_method: .word {self.context.mmap[method.mname]}')
# self.emit_data(f' .word {self.context.mmap[method.mname]}')
# self.emit_data_rec(datatype.word, self.context.mmap[method.mname])
@visitor.when(ast.CILData)
def visit(self, node: ast.CILData):
print(node.value)
if node.value == '':
value = '""'
elif isinstance(node.value, str):
value = node.value
else:
value = f'"{node.value}"'
self.emit_data(f'{node.name}: .asciiz {value}')
# self.emit_data_rec(datatype.asciiz, [node.value], node.name)
@visitor.when(ast.CILFunction)
def visit(self, node: ast.CILFunction):
# print("Function")
# print(node.fname + " Function")
methods = [
'Object_abort',
'Object_type_name',
'Object_copy',
'String_concat',
'String_substr',
'String_length',
'IO_in_int',
'IO_out_int',
'IO_in_string',
'IO_out_string',
]
if node.fname in methods:
return
self.emit_code("\n# Function start")
self.emit_label(node.fname)
self.emit_instruction(op.move, reg.fp, reg.sp)
self.push(reg.ra)
# self.pusha(['a0'])
for instruction in node.instructions:
self.visit(instruction)
# self.popa(['a0'])
computed = self.off_reg(1, reg.sp)
self.emit_instruction(op.lw, reg.ra, computed)
z = 4 * node.param_count + 8
self.emit_instruction(op.addi, reg.sp, reg.sp, z)
computed = self.off_reg(0, reg.sp)
self.emit_instruction(op.lw, reg.fp, computed)
self.emit_instruction(op.jr, reg.ra)
self.emit_code("\n# Function end")
@visitor.when(ast.CILMethod)
def visit(self, node: ast.CILMethod):
self.emit_code("\n# Method")
# print('AAAAAAAAA')
name = | |
else:
perso = False
user = await self.bot.safe_fetch('user', id) or id
if user:
name = user
else:
name = m['created_by_name']
autoplname = m['name']
nbvote = len(m.get('upvote', []))
if perso:
embed.description += f'`{i}.` ' + get_str(ctx, 'plfind-result-vote{}'.format(
's' if nbvote != 1 else '')).format(autoplname, '', nbvote) + '\n'
else:
embed.description += f'`{i}.` ' + get_str(ctx, 'plfind-result-vote{}'.format('s' if nbvote != 1 else '')).format(
f'**{autoplname}**', get_str(ctx, "plfind-result-middle").format(name), nbvote) + '\n'
await ctx.send(embed=embed)
@commands.command(aliases=['plvote', 'votepl', 'upvotepl'])
@commands.cooldown(rate=1, per=1.0, type=commands.BucketType.user)
async def plupvote(self, ctx, *, name=None):
"""
{command_prefix}plupvote [autoplaylist]
{help}
"""
settings = await SettingsDB.get_instance().get_glob_settings()
if not name:
player = await self.get_player(ctx.guild)
if not player.is_connected:
return await ctx.send(get_str(ctx, "not-connected"), delete_after=20)
if not player.autoplaylist:
return await ctx.send(get_str(ctx, "music-pl-disabled").format("`{}plstart`".format(get_server_prefixes(ctx.bot, ctx.guild))))
name = player.autoplaylist['name'].lower()
try:
autopl = settings.autoplaylists[name]
except KeyError: # can happen after plclear
return await ctx.send(get_str(ctx, "music-plsettings-got-deleted"))
else:
name = str(name.lower())
if ctx.message.mentions:
user = ctx.message.mentions[-1]
if user.mention not in name: # It was a prefix
user = None
if len(ctx.message.mentions) > 1:
user = ctx.message.mentions[0]
if user:
name = str(user.id)
try:
autopl = settings.autoplaylists[name]
except KeyError:
return await ctx.send(get_str(ctx, "music-plinfo-not-found"))
perso = await self.is_perso(ctx.guild, name=name)
if perso:
username = perso.name
title = get_str(ctx, "music-plsettings-autopl").format(username)
else:
title = f"Autoplaylist : {autopl['name']}"
embed = discord.Embed(title=title, colour=self.get_color(ctx.guild))
vote_list = autopl.get('upvote', [])
if ctx.author.id in vote_list:
vote_list.remove(ctx.author.id)
embed.description = get_str(ctx, "upvote-removed")
else:
vote_list.append(ctx.author.id)
embed.description = get_str(ctx, "upvote-added")
autopl['upvote'] = vote_list
await SettingsDB.get_instance().set_glob_settings(settings)
await ctx.send(embed=embed)
@pl.command(name='upvote', aliases=['vote'])
@commands.cooldown(rate=1, per=1.0, type=commands.BucketType.user)
async def pl_upvote(self, ctx, *, file_name=None):
"""
{command_prefix}pl upvote [autoplaylist]
{help}
"""
await ctx.invoke(self.plupvote, name=file_name)
@pl.command(name='add', aliases=['+', 'a'])
async def pl_add(self, ctx, *, song_url=None):
"""
{command_prefix}pl add [url]
{command_prefix}pl add [key_words]
{command_prefix}pl add current_queue
{command_prefix}pl add
{help}
"""
await ctx.invoke(self.pladd, song_url=song_url)
@commands.command(aliases=['pl+', 'pla'])
async def pladd(self, ctx, *, song_url=None):
"""
{command_prefix}pladd [url]
{command_prefix}pladd [key_words]
{command_prefix}pladd current_queue
{command_prefix}pladd
{help}
"""
player = await self.get_player(ctx.guild)
if not ctx.guild.me.voice:
return await ctx.send(get_str(ctx, "not-connected"), delete_after=20)
my_vc = ctx.guild.me.voice.channel
if not await self.is_dj(ctx):
if ctx.author not in my_vc.members or (ctx.author.voice.self_deaf or ctx.author.voice.deaf):
return await ctx.send(get_str(ctx, "music-not-my-channel").format(f"**{my_vc}**"), delete_after=30)
if not player.autoplaylist:
return await ctx.send(get_str(ctx, "music-pl-disabled").format("`{}plstart`".format(get_server_prefixes(ctx.bot, ctx.guild))))
settings = await SettingsDB.get_instance().get_glob_settings()
try:
settings.autoplaylists[player.autoplaylist['name'].lower()]
except KeyError: # can happen after plclear
return await ctx.send(get_str(ctx, "music-plsettings-got-deleted"))
file_name = player.autoplaylist['name'].lower()
if settings.autoplaylists[file_name]['private']:
if int(settings.autoplaylists[file_name]['created_by']) != ctx.author.id and str(ctx.author.id) not in settings.autoplaylists[file_name]['whitelist'] and ctx.author.id != owner_id:
return await ctx.send(get_str(ctx, "music-autoplaylist-noten-perms"))
add_current = False
# IDK, maybe he wants to add c/cu/cur...
if song_url and 'current_queue'.startswith(song_url.lower()) and len(song_url) > 3:
if player.queue:
add_current = True
results = {'playlistInfo': {},
'loadType': 'PLAYLIST_LOADED', 'tracks': []}
if player.current:
current = {'info': {}}
current['info']['title'] = player.current.title
current['info']['uri'] = player.current.uri
results['tracks'].append(current)
for track in player.queue:
current = {'info': {}}
current['info']['title'] = track.title
current['info']['uri'] = track.uri
results['tracks'].append(current)
results['playlistInfo'] = {
'selectedTrack': -1, 'name': 'Current Queue'}
else:
return await load.edit(content=get_str(ctx, "music-plinfo-empty-auto").format("`{}pladd`".format(get_server_prefixes(ctx.bot, ctx.guild))))
if not song_url:
if player.current:
song_url = player.current.uri
title = player.current.title
else:
return await ctx.send(get_str(ctx, "not-playing"), delete_after=20)
elif not match_url(song_url) and not add_current:
if self.is_spotify(song_url):
try:
data = await self.prepare_spotify(ctx, song_url, node=player.node)
except SpotifyError as e:
return await ctx.send(e)
else:
data = await self.prepare_url(query=song_url, node=player.node)
if not data or not data['tracks']:
if data['loadType'] == "LOAD_FAILED":
await ctx.send(get_str(ctx, "music-load-failed").format("`{}search`".format(get_server_prefixes(ctx.bot, ctx.guild))))
elif data['loadType'] == "NO_MATCHES":
await ctx.send(get_str(ctx, "music-no-result").format("`{}search`".format(get_server_prefixes(ctx.bot, ctx.guild))))
return
title = data['tracks'][0]['info']['title']
song_url = data['tracks'][0]['info']['uri']
else:
if self.is_spotify(song_url):
try:
results = await self.prepare_spotify(ctx, song_url, node=player.node)
except SpotifyError as e:
return await ctx.send(e)
else:
if not add_current:
results = await self.prepare_url(query=song_url, node=player.node)
if not results or not results['tracks']:
if results['loadType'] == "LOAD_FAILED":
await ctx.send(get_str(ctx, "music-load-failed").format("`{}search`".format(get_server_prefixes(ctx.bot, ctx.guild))))
elif results['loadType'] == "NO_MATCHES":
await ctx.send(get_str(ctx, "music-no-result").format("`{}search`".format(get_server_prefixes(ctx.bot, ctx.guild))))
return
if results['playlistInfo']: # it's a playlist with multiple tracks
urls = [track['info']['uri'] for track in results['tracks']]
for url in urls:
if url in player.autoplaylist['songs']:
urls.remove(url)
else:
player.autoplaylist['songs'].append(url)
settings.autoplaylists[player.autoplaylist['name'].lower(
)] = player.autoplaylist
await SettingsDB.get_instance().set_glob_settings(settings)
added = len(urls)
not_added = len(results['tracks']) - added
if added and not not_added:
content = get_str(
ctx, "music-pladd-pl-added").format(added)
elif added and not_added:
content = get_str(
ctx, "music-pladd-pls-added").format(added, not_added)
else:
content = get_str(ctx, "music-pladd-all-already")
if 'failed' in results:
content += f" ({get_str(ctx, 'music-spotify-songs-failed').format(results['failed']) if results['failed'] > 1 else get_str(ctx, 'music-spotify-song-failed').format(results['failed'])})"
return await ctx.send(content)
else: # it's just a track
title = results['tracks'][0]['info']['title']
start_time = re.findall('[&?](t|start|s)=(\d+)', song_url)
song_url = results['tracks'][0]['info']['uri'] + \
(f'?t={start_time[-1][-1]}' if start_time else '')
if any(s.split('?t=')[0] == song_url.split('?t=')[0] for s in player.autoplaylist['songs']):
return await ctx.send(get_str(ctx, "music-pladd-already-present"), delete_after=30)
else:
player.autoplaylist['songs'].append(song_url)
settings.autoplaylists[player.autoplaylist['name'].lower(
)] = player.autoplaylist
await SettingsDB.get_instance().set_glob_settings(settings)
await ctx.send(get_str(ctx, "music-pladd-added").format(f"**{title}**"))
@pl.command(name='remove', aliases=['-', 'r', 'delete'])
async def pl_remove(self, ctx, *, song_url=None):
"""
{command_prefix}plremove [position_in_autoplaylist]
{command_prefix}plremove [key_words]
{command_prefix}plremove [url]
{command_prefix}plremove
{help}
"""
await ctx.invoke(self.plremove, song_url=song_url)
@commands.command(aliases=['pl-', 'plr', 'pldelete'])
async def plremove(self, ctx, *, song_url=None):
"""
{command_prefix}plremove [url]
{command_prefix}plremove [key_words]
{command_prefix}plremove
{help}
"""
player = await self.get_player(ctx.guild)
if not ctx.guild.me.voice:
return await ctx.send(get_str(ctx, "not-connected"), delete_after=20)
my_vc = ctx.guild.me.voice.channel
if not await self.is_dj(ctx):
if ctx.author not in my_vc.members or (ctx.author.voice.self_deaf or ctx.author.voice.deaf):
return await ctx.send(get_str(ctx, "music-not-my-channel").format(f"**{my_vc}**"), delete_after=30)
if not player.autoplaylist:
return await ctx.send(get_str(ctx, "music-pl-disabled").format("`{}plstart`".format(get_server_prefixes(ctx.bot, ctx.guild))))
settings = await SettingsDB.get_instance().get_glob_settings()
try:
settings.autoplaylists[player.autoplaylist['name'].lower()]
except KeyError: # can happen after plclear
return await ctx.send(get_str(ctx, "music-plsettings-got-deleted"))
file_name = player.autoplaylist['name'].lower()
if settings.autoplaylists[file_name]['private']:
if int(settings.autoplaylists[file_name]['created_by']) != ctx.author.id and str(ctx.author.id) not in settings.autoplaylists[file_name]['whitelist'] and ctx.author.id != owner_id:
return await ctx.send(get_str(ctx, "music-autoplaylist-noten-perms"))
if not song_url:
if player.current:
song_url = player.current.uri
title = player.current.title
else:
return await ctx.send(get_str(ctx, "not-playing"), delete_after=20)
elif not match_url(song_url):
if song_url.isdigit():
if int(song_url) <= len(player.autoplaylist['songs']):
return await ctx.invoke(self.plremove, song_url=player.autoplaylist['songs'][int(song_url) - 1])
if self.is_spotify(song_url):
try:
data = await self.prepare_spotify(ctx, song_url, node=player.node)
except SpotifyError as e:
return await ctx.send(e)
else:
data = await self.prepare_url(query=song_url, node=player.node)
if not data or not data['tracks']:
if data['loadType'] == "LOAD_FAILED":
await ctx.send(get_str(ctx, "music-load-failed").format("`{}search`".format(get_server_prefixes(ctx.bot, ctx.guild))))
elif data['loadType'] == "NO_MATCHES":
await ctx.send(get_str(ctx, "music-no-result").format("`{}search`".format(get_server_prefixes(ctx.bot, ctx.guild))))
return
title = data['tracks'][0]['info']['title']
song_url = data['tracks'][0]['info']['uri']
else:
if self.is_spotify(song_url):
try:
results = await self.prepare_spotify(ctx, song_url, node=player.node)
except SpotifyError as e:
return await ctx.send(e)
else:
results = await self.prepare_url(query=song_url, node=player.node)
if not results or not results['tracks']:
if results['loadType'] == "LOAD_FAILED":
await ctx.send(get_str(ctx, "music-load-failed").format("`{}search`".format(get_server_prefixes(ctx.bot, ctx.guild))))
elif results['loadType'] == "NO_MATCHES":
await ctx.send(get_str(ctx, "music-no-result").format("`{}search`".format(get_server_prefixes(ctx.bot, ctx.guild))))
return
if results['playlistInfo']: # it's a playlist with multiple tracks
urls = [track['info']['uri'] for track in results['tracks']]
for url in urls:
if url not in player.autoplaylist['songs']:
urls.remove(url)
else:
player.autoplaylist['songs'].remove(url)
settings.autoplaylists[player.autoplaylist['name'].lower(
)] = player.autoplaylist
await SettingsDB.get_instance().set_glob_settings(settings)
removed = len(urls)
not_removed = len(results['tracks']) - removed
if removed and not not_removed:
content = get_str(
ctx, "music-plremove-pl-removed").format(removed)
elif removed and not_removed:
content = get_str(
ctx, "music-plremove-pls-removed").format(removed, not_removed)
else:
content = get_str(ctx, "music-plremove-all-already")
if 'failed' in results:
content += f" ({get_str(ctx, 'music-spotify-songs-failed').format(results['failed']) if results['failed'] > 1 else get_str(ctx, 'music-spotify-song-failed').format(results['failed'])})"
return await ctx.send(content)
else: # it's just a track
title = results['tracks'][0]['info']['title']
song_url = results['tracks'][0]['info']['uri']
occ = [s for s in player.autoplaylist['songs']
if s.split('?t=')[0] == song_url.split('?t=')[0]]
if occ:
song_url = occ[0]
if song_url not in player.autoplaylist['songs']:
return await ctx.send(get_str(ctx, "music-plremove-not-found"), delete_after=30)
else:
player.autoplaylist['songs'].remove(song_url)
settings.autoplaylists[player.autoplaylist['name'].lower(
)] = player.autoplaylist
await SettingsDB.get_instance().set_glob_settings(settings)
await ctx.send(get_str(ctx, "music-plremove-removed").format(f"**{title}**"))
@commands.command(aliases=['nextplay', 'playtop', 'plnext', 'playafter', 'pnext', 'after', 'topplay', 'playt'])
async def playnext(self, ctx, *, query: str):
"""
{command_prefix}playnext [url]
{command_prefix}playnext [key_words]
{help}
"""
await ctx.trigger_typing()
song = await ctx.invoke(self.playnow, query=query, next=True)
if isinstance(song, lavalink.AudioTrack):
await ctx.send(get_str(ctx, "music-promote-promoted").format(f"**{song.title}**"))
@commands.command(aliases=['pnow', 'instaplay', 'pn', 'playn', 'streamnow', 'singnow'])
async def playnow(self, ctx, *, query: str, next=False):
"""
{command_prefix}playnow [url]
{command_prefix}playnow [key_words]
{help}
"""
if not ctx.me.voice or ctx.guild.id not in self.bot.lavalink.players.players:
try:
player = await ctx.invoke(self.voice_connect)
except NoVoiceChannel:
ctx.command.reset_cooldown(ctx)
return await ctx.send(get_str(ctx, "music-join-no-channel"))
except lavalink.NodeException:
return await ctx.send(get_str(ctx, "music-nodes-unavailable"))
if not player:
return
else:
player = await self.get_player(ctx.guild)
if not ctx.guild.me.voice:
return await ctx.send(get_str(ctx, "not-connected"), delete_after=20)
my_vc = ctx.guild.me.voice.channel
if not await self.is_dj(ctx):
if ctx.author not in my_vc.members or (ctx.author.voice.self_deaf or ctx.author.voice.deaf):
return await ctx.send(get_str(ctx, "music-not-my-channel").format(f"**{my_vc}**"), delete_after=30)
if (not player.queue and not player.is_playing):
# why playnow if queue is empty...
return await ctx.invoke(self.play_song, query=query)
if not await self.is_dj(ctx):
raise commands.errors.CheckFailure
typing = True
channel = ctx.guild.get_channel(player.channel)
if channel:
async for entry in channel.history(limit=5):
if not entry or not player.npmsg: # idk
continue
if entry.id == player.npmsg.id:
typing = False
break
if entry.content and len(entry.content) > 500: # if msg too long
break
elif entry.attachments or entry.embeds: # if there are embeds or attchments
break
| |
<filename>dgen_os/python/diffusion_functions_elec.py
"""
Name: diffusion_functions
Purpose: Contains functions to calculate diffusion of distributed wind model
(1) Determine maximum market size as a function of payback time;
(2) Parameterize Bass diffusion curve with diffusion rates (p, q) set by payback time;
(3) Determine current stage (equivaluent time) of diffusion based on existing market and current economics
(4) Calculate new market share by stepping forward on diffusion curve.
"""
import numpy as np
import pandas as pd
import config
import utility_functions as utilfunc
import decorators
#==============================================================================
# Load logger
logger = utilfunc.get_logger()
#==============================================================================
#=============================================================================
@decorators.fn_timer(logger = logger, tab_level = 2, prefix = '')
def calc_diffusion_solar(df, is_first_year, bass_params, year,
override_p_value = None, override_q_value = None, override_teq_yr1_value = None):
"""
Calculates the market share (ms) added in the solve year. Market share must be less
than max market share (mms) except initial ms is greater than the calculated mms.
For this circumstance, no diffusion allowed until mms > ms. Also, do not allow ms to
decrease if economics deterioriate. Using the calculated
market share, relevant quantities are updated.
Parameters
----------
df : pandas.DataFrame
Input dataframe.
is_first_year : bool
Passed to :func:`diffusion_functions.calc_diffusion_market_share` to determine the increment of `teq`
bass_params : pandas.DataFrame
DataFrame generally derived from :func:`settings.get_bass_params`, includes the following attributes: `control_reg_id`, `country_abbr`, `sector_abbr`, `state_id`, `p`, `q`, `teq_yr1`, `tech`.
override_p_values : float , optional
Value to override bass diffusion `p` coefficient of innovation with.
overide_q_values : float, optional
Value to override bass diffusion `q` coefficient of immitation with.
override_teq_yr1_value : float, optional
Value to override bass diffusion `teq_yr1` value representing the number of years since diffusion began for the first year of observation.
Returns
-------
pandas.DataFrame
Dataframe contains `market_last_year` column to inform diffusion in next year.
"""
df = df.reset_index()
bass_params = bass_params[bass_params['tech']=='solar']
# set p/q/teq_yr1 params
df = pd.merge(df, bass_params[['state_abbr', 'bass_param_p', 'bass_param_q', 'teq_yr1', 'sector_abbr']], how = 'left', on = ['state_abbr','sector_abbr'])
# calc diffusion market share
df = calc_diffusion_market_share(df, is_first_year)
# market share floor is based on last year's market share
df['market_share'] = np.maximum(df['diffusion_market_share'], df['market_share_last_year'])
# calculate the "new" market share (old - current)
df['new_market_share'] = df['market_share'] - df['market_share_last_year']
# cap the new_market_share where the market share exceeds the max market share
df['new_market_share'] = np.where(df['market_share'] > df['max_market_share'], 0, df['new_market_share'])
# calculate new adopters, capacity and market value
df['new_adopters'] = df['new_market_share'] * df['developable_agent_weight']
df['new_market_value'] = df['new_adopters'] * df['system_kw'] * df['system_capex_per_kw']
df['new_system_kw'] = df['new_adopters'] * df['system_kw']
df['new_batt_kw'] = df['new_adopters'] * df['batt_kw']
df['new_batt_kwh'] = df['new_adopters'] * df['batt_kwh']
# then add these values to values from last year to get cumulative values:
df['number_of_adopters'] = df['adopters_cum_last_year'] + df['new_adopters']
df['market_value'] = df['market_value_last_year'] + df['new_market_value']
df['system_kw_cum'] = df['system_kw_cum_last_year'] + df['new_system_kw']
df['batt_kw_cum'] = df['batt_kw_cum_last_year'] + df['new_batt_kw']
df['batt_kwh_cum'] = df['batt_kwh_cum_last_year'] + df['new_batt_kwh']
# constrain state-level capacity totals to known historical values
if year in (2014, 2016, 2018):
group_cols = ['state_abbr', 'sector_abbr', 'year']
state_capacity_total = (df[group_cols+['system_kw_cum', 'batt_kw_cum', 'batt_kwh_cum', 'agent_id']].groupby(group_cols)
.agg({'system_kw_cum':'sum', 'batt_kw_cum':'sum', 'batt_kwh_cum':'sum', 'agent_id':'count'})
.rename(columns={'system_kw_cum':'state_solar_kw_cum', 'batt_kw_cum':'state_batt_kw_cum', 'batt_kwh_cum':'state_batt_kwh_cum', 'agent_id':'agent_count'})
.reset_index())
# coerce dtypes
state_capacity_total.state_solar_kw_cum = state_capacity_total.state_solar_kw_cum.astype(np.float64)
state_capacity_total.state_batt_kw_cum = state_capacity_total.state_batt_kw_cum.astype(np.float64)
state_capacity_total.state_batt_kwh_cum = state_capacity_total.state_batt_kwh_cum.astype(np.float64)
df.system_kw_cum = df.system_kw_cum.astype(np.float64)
df.batt_kw_cum = df.batt_kw_cum.astype(np.float64)
df.batt_kwh_cum = df.batt_kwh_cum.astype(np.float64)
# merge state totals back to agent df
df = pd.merge(df, state_capacity_total, how = 'left', on = ['state_abbr', 'sector_abbr', 'year'])
# read csv of historical capacity values by state and sector
historical_state_df = pd.read_csv(config.OBSERVED_DEPLOYMENT_BY_STATE)
# join historical data to agent df
df = pd.merge(df, historical_state_df, how='left', on=['state_abbr', 'sector_abbr', 'year'])
# calculate scale factor - weight that is given to each agent based on proportion of state total
# where state cumulative capacity is 0, proportion evenly to all agents
df['solar_scale_factor'] = np.where(df['state_solar_kw_cum'] == 0, 1.0/df['agent_count'], df['system_kw_cum'] / df['state_solar_kw_cum'])
df['batt_mw_scale_factor'] = np.where(df['state_batt_kw_cum'] == 0, 1.0/df['agent_count'], df['batt_kw_cum'] / df['state_batt_kw_cum'])
df['batt_mwh_scale_factor'] = np.where(df['state_batt_kwh_cum'] == 0, 1.0/df['agent_count'], df['batt_kwh_cum'] / df['state_batt_kwh_cum'])
# use scale factor to constrain agent capacity values to historical values
df['system_kw_cum'] = df['solar_scale_factor'] * df['observed_solar_mw'] * 1000.
df['batt_kw_cum'] = df['batt_mw_scale_factor'] * df['observed_storage_mw'] * 1000.
df['batt_kwh_cum'] = df['batt_mwh_scale_factor'] * df['observed_storage_mwh'] * 1000.
# recalculate number of adopters using anecdotal values
df['number_of_adopters'] = np.where(df['sector_abbr'] == 'res', df['system_kw_cum']/5.0, df['system_kw_cum']/100.0)
# recalculate market share
df['market_share'] = np.where(df['developable_agent_weight'] == 0, 0.0,
df['number_of_adopters'] / df['developable_agent_weight'])
df['market_share'] = df['market_share'].astype(np.float64)
df.drop(['agent_count',
'state_solar_kw_cum','state_batt_kw_cum','state_batt_kwh_cum',
'observed_solar_mw','observed_storage_mw','observed_storage_mwh',
'solar_scale_factor','batt_mw_scale_factor','batt_mwh_scale_factor'], axis=1, inplace=True)
market_last_year = df[['agent_id',
'market_share','max_market_share','number_of_adopters',
'market_value','initial_number_of_adopters','initial_pv_kw','initial_batt_kw','initial_batt_kwh',
'initial_market_share','initial_market_value',
'system_kw_cum','new_system_kw',
'batt_kw_cum','new_batt_kw',
'batt_kwh_cum','new_batt_kwh']]
market_last_year.rename(columns={'market_share':'market_share_last_year',
'max_market_share':'max_market_share_last_year',
'number_of_adopters':'adopters_cum_last_year',
'market_value': 'market_value_last_year',
'system_kw_cum':'system_kw_cum_last_year',
'batt_kw_cum':'batt_kw_cum_last_year',
'batt_kwh_cum':'batt_kwh_cum_last_year'}, inplace=True)
return df, market_last_year
#=============================================================================
# ^^^^ Diffusion Calculator ^^^^
@decorators.fn_timer(logger = logger, tab_level = 3, prefix = '')
def calc_diffusion(df, cur, con, techs, choose_tech, sectors, schema, is_first_year,
bass_params, override_p_value = None, override_q_value = None, override_teq_yr1_value = None):
"""
Calculate the fraction of overall population that have adopted the technology in the current period.
Parameters
----------
df : pandas.DataFrame
Attributes
----------
df.payback_period : numpy.ndarray
Payback period in years.
df.max_market_share : numpy.ndarray
Maximum market share as decimal percentage.
df.current_market_share : numpy.ndarray
Current market share as decimal percentage.
is_first_year : bool
If `True`, the new equivalent time (`teq2`) is equal to the original `teq_yr1` plus the increment defined in `teq`.
Otherwise, `teq2` is equal to `teq` plus 2 years.
Returns
-------
numpy.ndarray
The fraction of overall population that have adopted the technology
Note
----
1) This does not specify the actual new adoption fraction without knowing adoption in the previous period.
2) The relative economic attractiveness controls the p, q value in the Bass diffusion model.
3) The current assumption is that only payback and MBS are being used, that pp is bounded [0-30] and MBS is bounded [0-120].
"""
logger.info("\t\tCalculating Diffusion")
# set p/q/teq_yr1 params
df = set_bass_param(df, bass_params, override_p_value, override_q_value, override_teq_yr1_value)
# calc diffusion market share
df = calc_diffusion_market_share(df, is_first_year)
# ensure no diffusion for non-selected options
df['diffusion_market_share'] = df['diffusion_market_share'] * df['selected_option']
# market share floor is based on last year's market share
df['market_share'] = np.maximum(df['diffusion_market_share'], df['market_share_last_year'])
# if in tech choice mode, ensure that total market share doesn't exceed 1
if choose_tech == True:
# extract out the rows for unselected technologies
market_share_cap = df[df['selected_option'] == False][['county_id', 'bin_id', 'sector_abbr', 'market_share']].groupby(['county_id', 'bin_id', 'sector_abbr']).sum().reset_index()
# determine how much market share is allowable based on 1 - the MS of the unselected techs
market_share_cap['market_share_cap'] = 1 - market_share_cap['market_share']
# drop the market share column
market_share_cap.drop('market_share', inplace = True, axis = 1)
# merge to df
df = pd.merge(df, market_share_cap, how = 'left', on = ['county_id', 'bin_id', 'sector_abbr'])
# cap the market share (for the selected option only)
df['market_share'] = np.where(df['selected_option'] == True, np.minimum(df['market_share'], df['market_share_cap']), df['market_share'])
# drop the market share cap field
df.drop('market_share_cap', inplace = True, axis = 1)
# calculate the "new" market share (old - current)
df['new_market_share'] = df['market_share'] - df['market_share_last_year']
# cap the new_market_share where the market share exceeds the max market share
df['new_market_share'] = np.where(df['market_share'] > df['max_market_share'], 0, df['new_market_share'])
# calculate new adopters, capacity and market value
df['new_adopters'] = np.where(df['system_size_kw'] == 0, 0, df['new_market_share'] * df['developable_agent_weight'])
df['new_capacity'] = df['new_adopters'] * df['system_size_kw']
df['new_market_value'] = df['new_adopters'] * df['system_size_kw'] * df['installed_costs_dollars_per_kw']
# then add these values to values from last year to get cumulative values:
df['number_of_adopters'] = df['adopters_cum_last_year'] + df['new_adopters']
df['installed_capacity'] = df['installed_capacity_last_year'] + df['new_capacity'] # All capacity in kW in the model
df['market_value'] = df['market_value_last_year'] + df['new_market_value']
market_last_year = df[['county_id','bin_id', 'sector_abbr', 'tech', 'market_share', 'max_market_share','number_of_adopters', 'installed_capacity', 'market_value', 'initial_number_of_adopters', 'initial_capacity_mw', 'initial_market_share', 'initial_market_value']] # Update dataframe for next solve year
market_last_year.columns = ['county_id', 'bin_id', 'sector_abbr', 'tech', 'market_share_last_year', 'max_market_share_last_year','adopters_cum_last_year', 'installed_capacity_last_year', 'market_value_last_year', 'initial_number_of_adopters', 'initial_capacity_mw', 'initial_market_share', 'initial_market_value']
return df, market_last_year
#=============================================================================
# ^^^^ Calculate new diffusion in market segment ^^^^
def calc_diffusion_market_share(df, is_first_year):
"""
Calculate the fraction of overall population that have adopted (diffused into
the max market share) the technology in the current period. Note that this does
not specify the actual new adoption fraction without knowing adoption in the previous period.
Parameters
----------
df : pandas.DataFrame
Attributes
----------
df.p : numpy.ndarray
Bass diffusion parameter defining the coeffieicent of innovation.
df.q | |
3565, 3566, 3572, 3571)
model.createElement(560, 3596, 3597, 3603, 3602, 3566, 3567, 3573, 3572)
model.createElement(561, 3597, 3598, 3604, 3603, 3567, 3568, 3574, 3573)
model.createElement(562, 3598, 1161, 1162, 3604, 3568, 1156, 1157, 3574)
model.createElement(563, 905, 3599, 3605, 904, 900, 3569, 3575, 899)
model.createElement(564, 3599, 3600, 3606, 3605, 3569, 3570, 3576, 3575)
model.createElement(565, 3600, 3601, 3607, 3606, 3570, 3571, 3577, 3576)
model.createElement(566, 3601, 3602, 3608, 3607, 3571, 3572, 3578, 3577)
model.createElement(567, 3602, 3603, 3609, 3608, 3572, 3573, 3579, 3578)
model.createElement(568, 3603, 3604, 3610, 3609, 3573, 3574, 3580, 3579)
model.createElement(569, 3604, 1162, 1163, 3610, 3574, 1157, 1158, 3580)
model.createElement(570, 904, 3605, 1595, 73, 899, 3575, 1589, 74)
model.createElement(571, 3605, 3606, 1596, 1595, 3575, 3576, 1590, 1589)
model.createElement(572, 3606, 3607, 1597, 1596, 3576, 3577, 1591, 1590)
model.createElement(573, 3607, 3608, 1598, 1597, 3577, 3578, 1592, 1591)
model.createElement(574, 3608, 3609, 1599, 1598, 3578, 3579, 1593, 1592)
model.createElement(575, 3609, 3610, 1600, 1599, 3579, 3580, 1594, 1593)
model.createElement(576, 3610, 1163, 263, 1600, 3580, 1158, 264, 1594)
model.createElement(577, 150, 1367, 3611, 913, 151, 1361, 3581, 908)
model.createElement(578, 1367, 1368, 3612, 3611, 1361, 1362, 3582, 3581)
model.createElement(579, 1368, 1369, 3613, 3612, 1362, 1363, 3583, 3582)
model.createElement(580, 1369, 1370, 3614, 3613, 1363, 1364, 3584, 3583)
model.createElement(581, 1370, 1371, 3615, 3614, 1364, 1365, 3585, 3584)
model.createElement(582, 1371, 1372, 3616, 3615, 1365, 1366, 3586, 3585)
model.createElement(583, 1372, 279, 1164, 3616, 1366, 278, 1159, 3586)
model.createElement(584, 913, 3611, 3617, 912, 908, 3581, 3587, 907)
model.createElement(585, 3611, 3612, 3618, 3617, 3581, 3582, 3588, 3587)
model.createElement(586, 3612, 3613, 3619, 3618, 3582, 3583, 3589, 3588)
model.createElement(587, 3613, 3614, 3620, 3619, 3583, 3584, 3590, 3589)
model.createElement(588, 3614, 3615, 3621, 3620, 3584, 3585, 3591, 3590)
model.createElement(589, 3615, 3616, 3622, 3621, 3585, 3586, 3592, 3591)
model.createElement(590, 3616, 1164, 1165, 3622, 3586, 1159, 1160, 3592)
model.createElement(591, 912, 3617, 3623, 911, 907, 3587, 3593, 906)
model.createElement(592, 3617, 3618, 3624, 3623, 3587, 3588, 3594, 3593)
model.createElement(593, 3618, 3619, 3625, 3624, 3588, 3589, 3595, 3594)
model.createElement(594, 3619, 3620, 3626, 3625, 3589, 3590, 3596, 3595)
model.createElement(595, 3620, 3621, 3627, 3626, 3590, 3591, 3597, 3596)
model.createElement(596, 3621, 3622, 3628, 3627, 3591, 3592, 3598, 3597)
model.createElement(597, 3622, 1165, 1166, 3628, 3592, 1160, 1161, 3598)
model.createElement(598, 911, 3623, 3629, 910, 906, 3593, 3599, 905)
model.createElement(599, 3623, 3624, 3630, 3629, 3593, 3594, 3600, 3599)
model.createElement(600, 3624, 3625, 3631, 3630, 3594, 3595, 3601, 3600)
model.createElement(601, 3625, 3626, 3632, 3631, 3595, 3596, 3602, 3601)
model.createElement(602, 3626, 3627, 3633, 3632, 3596, 3597, 3603, 3602)
model.createElement(603, 3627, 3628, 3634, 3633, 3597, 3598, 3604, 3603)
model.createElement(604, 3628, 1166, 1167, 3634, 3598, 1161, 1162, 3604)
model.createElement(605, 910, 3629, 3635, 909, 905, 3599, 3605, 904)
model.createElement(606, 3629, 3630, 3636, 3635, 3599, 3600, 3606, 3605)
model.createElement(607, 3630, 3631, 3637, 3636, 3600, 3601, 3607, 3606)
model.createElement(608, 3631, 3632, 3638, 3637, 3601, 3602, 3608, 3607)
model.createElement(609, 3632, 3633, 3639, 3638, 3602, 3603, 3609, 3608)
model.createElement(610, 3633, 3634, 3640, 3639, 3603, 3604, 3610, 3609)
model.createElement(611, 3634, 1167, 1168, 3640, 3604, 1162, 1163, 3610)
model.createElement(612, 909, 3635, 1601, 72, 904, 3605, 1595, 73)
model.createElement(613, 3635, 3636, 1602, 1601, 3605, 3606, 1596, 1595)
model.createElement(614, 3636, 3637, 1603, 1602, 3606, 3607, 1597, 1596)
model.createElement(615, 3637, 3638, 1604, 1603, 3607, 3608, 1598, 1597)
model.createElement(616, 3638, 3639, 1605, 1604, 3608, 3609, 1599, 1598)
model.createElement(617, 3639, 3640, 1606, 1605, 3609, 3610, 1600, 1599)
model.createElement(618, 3640, 1168, 262, 1606, 3610, 1163, 263, 1600)
model.createElement(619, 149, 1373, 3641, 918, 150, 1367, 3611, 913)
model.createElement(620, 1373, 1374, 3642, 3641, 1367, 1368, 3612, 3611)
model.createElement(621, 1374, 1375, 3643, 3642, 1368, 1369, 3613, 3612)
model.createElement(622, 1375, 1376, 3644, 3643, 1369, 1370, 3614, 3613)
model.createElement(623, 1376, 1377, 3645, 3644, 1370, 1371, 3615, 3614)
model.createElement(624, 1377, 1378, 3646, 3645, 1371, 1372, 3616, 3615)
model.createElement(625, 1378, 280, 1169, 3646, 1372, 279, 1164, 3616)
model.createElement(626, 918, 3641, 3647, 917, 913, 3611, 3617, 912)
model.createElement(627, 3641, 3642, 3648, 3647, 3611, 3612, 3618, 3617)
model.createElement(628, 3642, 3643, 3649, 3648, 3612, 3613, 3619, 3618)
model.createElement(629, 3643, 3644, 3650, 3649, 3613, 3614, 3620, 3619)
model.createElement(630, 3644, 3645, 3651, 3650, 3614, 3615, 3621, 3620)
model.createElement(631, 3645, 3646, 3652, 3651, 3615, 3616, 3622, 3621)
model.createElement(632, 3646, 1169, 1170, 3652, 3616, 1164, 1165, 3622)
model.createElement(633, 917, 3647, 3653, 916, 912, 3617, 3623, 911)
model.createElement(634, 3647, 3648, 3654, 3653, 3617, 3618, 3624, 3623)
model.createElement(635, 3648, 3649, 3655, 3654, 3618, 3619, 3625, 3624)
model.createElement(636, 3649, 3650, 3656, 3655, 3619, 3620, 3626, 3625)
model.createElement(637, 3650, 3651, 3657, 3656, 3620, 3621, 3627, 3626)
model.createElement(638, 3651, 3652, 3658, 3657, 3621, 3622, 3628, 3627)
model.createElement(639, 3652, 1170, 1171, 3658, 3622, 1165, 1166, 3628)
model.createElement(640, 916, 3653, 3659, 915, 911, 3623, 3629, 910)
model.createElement(641, 3653, 3654, 3660, 3659, 3623, 3624, 3630, 3629)
model.createElement(642, 3654, 3655, 3661, 3660, 3624, 3625, 3631, 3630)
model.createElement(643, 3655, 3656, 3662, 3661, 3625, 3626, 3632, 3631)
model.createElement(644, 3656, 3657, 3663, 3662, 3626, 3627, 3633, 3632)
model.createElement(645, 3657, 3658, 3664, 3663, 3627, 3628, 3634, 3633)
model.createElement(646, 3658, 1171, 1172, 3664, 3628, 1166, 1167, 3634)
model.createElement(647, 915, 3659, 3665, 914, 910, 3629, 3635, 909)
model.createElement(648, 3659, 3660, 3666, 3665, 3629, 3630, 3636, 3635)
model.createElement(649, 3660, 3661, 3667, 3666, 3630, 3631, 3637, 3636)
model.createElement(650, 3661, 3662, 3668, 3667, 3631, 3632, 3638, 3637)
model.createElement(651, 3662, 3663, 3669, 3668, 3632, 3633, 3639, 3638)
model.createElement(652, 3663, 3664, 3670, 3669, 3633, 3634, 3640, 3639)
model.createElement(653, 3664, 1172, 1173, 3670, 3634, 1167, 1168, 3640)
model.createElement(654, 914, 3665, 1607, 71, 909, 3635, 1601, 72)
model.createElement(655, 3665, 3666, 1608, 1607, 3635, 3636, 1602, 1601)
model.createElement(656, 3666, 3667, 1609, 1608, 3636, 3637, 1603, 1602)
model.createElement(657, 3667, 3668, 1610, 1609, 3637, 3638, 1604, 1603)
model.createElement(658, 3668, 3669, 1611, 1610, 3638, 3639, 1605, 1604)
model.createElement(659, 3669, 3670, 1612, 1611, 3639, 3640, 1606, 1605)
model.createElement(660, 3670, 1173, 261, 1612, 3640, 1168, 262, 1606)
model.createElement(661, 148, 1379, 3671, 923, 149, 1373, 3641, 918)
model.createElement(662, 1379, 1380, 3672, 3671, 1373, 1374, 3642, 3641)
model.createElement(663, 1380, 1381, 3673, 3672, 1374, 1375, 3643, 3642)
model.createElement(664, 1381, 1382, 3674, 3673, 1375, 1376, 3644, 3643)
model.createElement(665, 1382, 1383, 3675, 3674, 1376, 1377, 3645, 3644)
model.createElement(666, 1383, 1384, 3676, 3675, 1377, 1378, 3646, 3645)
model.createElement(667, 1384, 281, 1174, 3676, 1378, 280, 1169, 3646)
model.createElement(668, 923, 3671, 3677, 922, 918, 3641, 3647, 917)
model.createElement(669, 3671, 3672, 3678, 3677, 3641, 3642, 3648, 3647)
model.createElement(670, 3672, 3673, 3679, 3678, 3642, 3643, 3649, 3648)
model.createElement(671, 3673, 3674, 3680, 3679, 3643, 3644, 3650, 3649)
model.createElement(672, 3674, 3675, 3681, 3680, 3644, 3645, 3651, 3650)
model.createElement(673, 3675, 3676, 3682, 3681, 3645, 3646, 3652, 3651)
model.createElement(674, 3676, 1174, 1175, 3682, 3646, 1169, 1170, 3652)
model.createElement(675, 922, 3677, 3683, 921, 917, 3647, 3653, 916)
model.createElement(676, 3677, 3678, 3684, 3683, 3647, 3648, 3654, 3653)
model.createElement(677, 3678, 3679, 3685, 3684, 3648, 3649, 3655, 3654)
model.createElement(678, 3679, 3680, 3686, 3685, 3649, 3650, 3656, 3655)
model.createElement(679, 3680, 3681, 3687, 3686, 3650, 3651, 3657, 3656)
model.createElement(680, 3681, 3682, 3688, 3687, 3651, 3652, 3658, 3657)
model.createElement(681, 3682, 1175, 1176, 3688, 3652, 1170, 1171, 3658)
model.createElement(682, 921, 3683, 3689, 920, 916, 3653, 3659, 915)
model.createElement(683, 3683, 3684, 3690, 3689, 3653, 3654, 3660, 3659)
model.createElement(684, 3684, 3685, 3691, 3690, 3654, 3655, 3661, 3660)
model.createElement(685, 3685, 3686, 3692, 3691, 3655, 3656, 3662, 3661)
model.createElement(686, 3686, 3687, 3693, 3692, 3656, 3657, 3663, 3662)
model.createElement(687, 3687, 3688, 3694, 3693, 3657, 3658, 3664, 3663)
model.createElement(688, 3688, 1176, 1177, 3694, 3658, 1171, 1172, 3664)
model.createElement(689, 920, 3689, 3695, 919, 915, 3659, 3665, 914)
model.createElement(690, 3689, 3690, 3696, 3695, 3659, 3660, 3666, 3665)
model.createElement(691, 3690, 3691, 3697, 3696, 3660, 3661, 3667, 3666)
model.createElement(692, 3691, 3692, 3698, 3697, 3661, 3662, 3668, 3667)
model.createElement(693, 3692, 3693, 3699, 3698, 3662, 3663, 3669, 3668)
model.createElement(694, 3693, 3694, 3700, 3699, 3663, 3664, 3670, 3669)
model.createElement(695, 3694, 1177, 1178, 3700, 3664, 1172, 1173, 3670)
model.createElement(696, 919, 3695, 1613, 70, 914, 3665, 1607, 71)
model.createElement(697, 3695, 3696, 1614, 1613, 3665, 3666, 1608, 1607)
model.createElement(698, 3696, 3697, 1615, 1614, 3666, 3667, 1609, 1608)
model.createElement(699, 3697, 3698, 1616, 1615, 3667, 3668, 1610, 1609)
model.createElement(700, 3698, 3699, 1617, 1616, 3668, 3669, 1611, 1610)
model.createElement(701, 3699, 3700, 1618, 1617, 3669, 3670, 1612, 1611)
model.createElement(702, 3700, 1178, 260, 1618, 3670, 1173, 261, 1612)
model.createElement(703, 147, 1385, 3701, 928, 148, 1379, 3671, 923)
model.createElement(704, 1385, 1386, 3702, 3701, 1379, 1380, 3672, 3671)
model.createElement(705, 1386, 1387, 3703, 3702, 1380, 1381, 3673, 3672)
model.createElement(706, 1387, 1388, 3704, 3703, 1381, 1382, 3674, 3673)
model.createElement(707, 1388, 1389, 3705, 3704, 1382, 1383, 3675, 3674)
model.createElement(708, 1389, 1390, 3706, 3705, 1383, 1384, 3676, 3675)
model.createElement(709, 1390, 282, 1179, 3706, 1384, 281, 1174, 3676)
model.createElement(710, 928, 3701, 3707, 927, 923, 3671, 3677, 922)
model.createElement(711, 3701, 3702, 3708, 3707, 3671, 3672, 3678, 3677)
model.createElement(712, 3702, 3703, 3709, 3708, 3672, 3673, 3679, 3678)
model.createElement(713, 3703, 3704, 3710, 3709, 3673, 3674, 3680, 3679)
model.createElement(714, 3704, 3705, 3711, 3710, 3674, 3675, 3681, 3680)
model.createElement(715, 3705, 3706, 3712, 3711, 3675, 3676, 3682, 3681)
model.createElement(716, 3706, 1179, 1180, 3712, 3676, 1174, 1175, 3682)
model.createElement(717, 927, 3707, 3713, 926, 922, 3677, 3683, 921)
model.createElement(718, 3707, 3708, 3714, 3713, 3677, 3678, 3684, 3683)
model.createElement(719, 3708, 3709, 3715, 3714, | |
to perform the operation on.
:param file input_file5: Fifth input file to perform the operation on.
:param file input_file6: Sixth input file to perform the operation on.
:param file input_file7: Seventh input file to perform the operation on.
:param file input_file8: Eighth input file to perform the operation on.
:param file input_file9: Ninth input file to perform the operation on.
:param file input_file10: Tenth input file to perform the operation on.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.merge_document_txt_multi_with_http_info(input_file1, input_file2, **kwargs) # noqa: E501
else:
(data) = self.merge_document_txt_multi_with_http_info(input_file1, input_file2, **kwargs) # noqa: E501
return data
def merge_document_txt_multi_with_http_info(self, input_file1, input_file2, **kwargs): # noqa: E501
"""Merge Multple Text (TXT) Files Together # noqa: E501
Combine multiple Text (.TXT) files into a single text document, preserving the order of the input documents in the combined document by stacking them vertically. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.merge_document_txt_multi_with_http_info(input_file1, input_file2, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file1: First input file to perform the operation on. (required)
:param file input_file2: Second input file to perform the operation on. (required)
:param file input_file3: Third input file to perform the operation on.
:param file input_file4: Fourth input file to perform the operation on.
:param file input_file5: Fifth input file to perform the operation on.
:param file input_file6: Sixth input file to perform the operation on.
:param file input_file7: Seventh input file to perform the operation on.
:param file input_file8: Eighth input file to perform the operation on.
:param file input_file9: Ninth input file to perform the operation on.
:param file input_file10: Tenth input file to perform the operation on.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input_file1', 'input_file2', 'input_file3', 'input_file4', 'input_file5', 'input_file6', 'input_file7', 'input_file8', 'input_file9', 'input_file10'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method merge_document_txt_multi" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'input_file1' is set
if ('input_file1' not in params or
params['input_file1'] is None):
raise ValueError("Missing the required parameter `input_file1` when calling `merge_document_txt_multi`") # noqa: E501
# verify the required parameter 'input_file2' is set
if ('input_file2' not in params or
params['input_file2'] is None):
raise ValueError("Missing the required parameter `input_file2` when calling `merge_document_txt_multi`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'input_file1' in params:
local_var_files['inputFile1'] = params['input_file1'] # noqa: E501
if 'input_file2' in params:
local_var_files['inputFile2'] = params['input_file2'] # noqa: E501
if 'input_file3' in params:
local_var_files['inputFile3'] = params['input_file3'] # noqa: E501
if 'input_file4' in params:
local_var_files['inputFile4'] = params['input_file4'] # noqa: E501
if 'input_file5' in params:
local_var_files['inputFile5'] = params['input_file5'] # noqa: E501
if 'input_file6' in params:
local_var_files['inputFile6'] = params['input_file6'] # noqa: E501
if 'input_file7' in params:
local_var_files['inputFile7'] = params['input_file7'] # noqa: E501
if 'input_file8' in params:
local_var_files['inputFile8'] = params['input_file8'] # noqa: E501
if 'input_file9' in params:
local_var_files['inputFile9'] = params['input_file9'] # noqa: E501
if 'input_file10' in params:
local_var_files['inputFile10'] = params['input_file10'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/octet-stream']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/convert/merge/txt/multi', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def merge_document_xlsx(self, input_file1, input_file2, **kwargs): # noqa: E501
"""Merge Two Excel XLSX Together # noqa: E501
Combine two Office Excel spreadsheets (xlsx) into a single Office Excel spreadsheet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.merge_document_xlsx(input_file1, input_file2, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file1: First input file to perform the operation on. (required)
:param file input_file2: Second input file to perform the operation on (more than 2 can be supplied). (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.merge_document_xlsx_with_http_info(input_file1, input_file2, **kwargs) # noqa: E501
else:
(data) = self.merge_document_xlsx_with_http_info(input_file1, input_file2, **kwargs) # noqa: E501
return data
def merge_document_xlsx_with_http_info(self, input_file1, input_file2, **kwargs): # noqa: E501
"""Merge Two Excel XLSX Together # noqa: E501
Combine two Office Excel spreadsheets (xlsx) into a single Office Excel spreadsheet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.merge_document_xlsx_with_http_info(input_file1, input_file2, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file1: First input file to perform the operation on. (required)
:param file input_file2: Second input file to perform the operation on (more than 2 can be supplied). (required)
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['input_file1', 'input_file2'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method merge_document_xlsx" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'input_file1' is set
if ('input_file1' not in params or
params['input_file1'] is None):
raise ValueError("Missing the required parameter `input_file1` when calling `merge_document_xlsx`") # noqa: E501
# verify the required parameter 'input_file2' is set
if ('input_file2' not in params or
params['input_file2'] is None):
raise ValueError("Missing the required parameter `input_file2` when calling `merge_document_xlsx`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'input_file1' in params:
local_var_files['inputFile1'] = params['input_file1'] # noqa: E501
if 'input_file2' in params:
local_var_files['inputFile2'] = params['input_file2'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/octet-stream']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['Apikey'] # noqa: E501
return self.api_client.call_api(
'/convert/merge/xlsx', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def merge_document_xlsx_multi(self, input_file1, input_file2, **kwargs): # noqa: E501
"""Merge Multple Excel XLSX Together # noqa: E501
Combine multiple Office Excel spreadsheets (xlsx) into a single Office Excel spreadsheet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.merge_document_xlsx_multi(input_file1, input_file2, async_req=True)
>>> result = thread.get()
:param async_req bool
:param file input_file1: First input file to perform the operation on. (required)
:param file input_file2: Second input file to perform the operation on. (required)
:param file input_file3: Third input file to perform the operation on.
:param file input_file4: Fourth input file to perform the operation on.
:param file input_file5: Fifth input file to perform the operation on.
:param file input_file6: Sixth input file to perform the operation on.
:param file input_file7: Seventh input file to perform the operation on.
:param file input_file8: Eighth input file to perform the operation on.
:param file input_file9: Ninth input file to perform the operation on.
:param file input_file10: Tenth input file to perform the operation on.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.merge_document_xlsx_multi_with_http_info(input_file1, input_file2, **kwargs) # noqa: E501
else:
(data) = self.merge_document_xlsx_multi_with_http_info(input_file1, input_file2, **kwargs) # noqa: E501
return data
def merge_document_xlsx_multi_with_http_info(self, input_file1, input_file2, **kwargs): # noqa: E501
"""Merge Multple Excel XLSX Together # noqa: E501
Combine multiple Office Excel spreadsheets (xlsx) into a single Office Excel spreadsheet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP | |
this is a transient dependency
or not.
:param value: the new value for the transient property; ``True`` if this is a
transient dependency or ``False`` if not.
"""
self._transient = value
def derive_from(self, group: str, name: str, version: str) -> 'Dependency':
"""
A function that creates a dependency object off this one. The dependency returned
will inherit the same location and scope as this one.
:param group: the name of the group the dependency belongs to.
:param name: the name of the dependency.
:param version: the version (in semver form) of the dependency.
:return: the derived dependency.
"""
content = {
'location': self._location,
'group': group,
'name': name,
'version': version,
'scope': self._scope
}
return Dependency(name, content)
@property
def scope(self) -> Sequence[str]:
"""
A read-only property that returns a list containing the names of the tasks
to which this dependency applies.
:return: the tasks that the dependency applies to.
"""
return self._scope
def applies_to(self, task: str) -> bool:
"""
A function that returns whether or not this dependency applies to a specific
task.
:param task: the name of the task to test.
:return: ``True`` if the dependency applies to the specified task or ``False``
if not.
"""
return task in self._scope
def format(self, format_pattern: str):
"""
A function to format a string based on details from this dependency. We support
three variables; ``group``, ``name`` and ``version``. Surround them with braces
in the format pattern for proper substitution.
:param format_pattern: the format pattern to fill in.
"""
return format_pattern.format(group=self.group, name=self._name, version=self._version)
def same_but_for_version(self, other: 'Dependency') -> bool:
"""
This method tests to see if a given dependency is the same as this one except at
a different version.
:param other: the dependency to compare against.
:return: ``True`` if the dependencies are the same except for version number.
"""
return self.id == other.id and self._version != other._version
def __repr__(self):
return self.format('{group}:{name}:{version}')
def __eq__(self, other):
if not isinstance(other, Dependency):
return NotImplemented
return repr(self) == repr(other)
def __ne__(self, other):
return not self == other
class DependencyPathSet(object):
"""
Instances of this class represent a set of paths that a dependency represents.
"""
def __init__(self, dependency: Dependency, primary_file: Path):
"""
A function that creates instances of the ``DependencyPathSet`` class.
:param dependency: the dependency to whom this path set belongs.
:param primary_file: the primary file of the path set.
"""
self._dependency = dependency
self._primary_path = primary_file
self._secondary_paths: Dict[str, Path] = {}
@property
def dependency(self) -> Dependency:
"""
A read-only property that returns the dependency to which this set of paths belongs.
:return: the dependency we belong to.
"""
return self._dependency
@property
def primary_path(self) -> Path:
"""
A read-only property that returns the primary path of the dependency.
:return: the dependency's primary file.
"""
return self._primary_path
def add_secondary_path(self, key: str, path: Path):
"""
This function is used to add a secondary path to the dependency file set.
Once a secondary file has been stored in this way, it is accessible by
the key as an attribute name.
:param key: the key by which the secondary path should be known.
:param path: the secondary path to remember.
"""
self._secondary_paths[key] = path
def has_secondary_path(self, key: str) -> bool:
"""
A function that returns whether this path set contains a secondary file known by
the given key.
:param key: the key to test.
:return: ``True`` if we have a secondary file known by the given key or ``False``
if not.
"""
return key in self._secondary_paths
def __getattr__(self, key):
if key not in self._secondary_paths:
raise AttributeError(f"'DependencyPathSet' object has no attribute '{key}'")
return self._secondary_paths[key]
ResolveDependencyFunction = Callable[['DependencyContext', Dependency], Optional[DependencyPathSet]]
ProjectConfigToPathFunction = Callable[[Any], Optional[Path]]
class Language(object):
def __init__(self, module, language: str):
self.language = language
self.configuration_class: Optional[Type] = None
self.configuration_schema: Optional[SchemaValidator] = None
self.tasks: Sequence[Task] = []
self.resolver: Optional[ResolveDependencyFunction] = None
self.project_as_dist_path: Optional[ProjectConfigToPathFunction] = None
function = getattr(module, 'define_language', None)
if function:
function(self)
def get_task(self, name: str) -> Optional[Task]:
"""
A function that returns the named task. If there is no task that carries the
requested name, then ``None`` will be returned.
:param name: the name of the desired task.
:return: the requested task or ``None``.
"""
return find(self.tasks, lambda task: task.name == name)
class RemoteResolver(object):
"""
Instances of this class represent a URL and local directory pair used for resolving
a remote file reference into a local one.
"""
def __init__(self, directory_url: str, directory_path: Path):
"""
A function to create an instance of the ``RemoteResolver`` class.
:param directory_url: the parent URL where file assets may be found.
:param directory_path: the local path, relative to the file cache, where remote files
should be downloaded.
"""
if directory_url.endswith('/'):
directory_url = directory_url[:-1]
self._directory_url = directory_url
self._directory_path = directory_path
self._resolve_remotely = file_cache.resolve_file
def resolve(self, file_name: str) -> Optional[Path]:
"""
A function that will resolve the given file name from a remote reference into
a local one, downloading it if needed.
:param file_name: the name of the desired file.
:return: the absolute path to the local file or ``None`` if it doesn't exist.
"""
url = f'{self._directory_url}/{file_name}'
directory = self._directory_path / file_name
return self._resolve_remotely(url, directory)
class Resolution(object):
"""
Instances of this class represent the resolution of a dependency. It carries a
dependency, the path set it resolved to and the number of dependencies it causes
(including itself).
"""
def __init__(self, dependency: Dependency, path_set: DependencyPathSet):
"""
A function to create an instance of the ``Resolution`` class.
:param dependency: the dependency we are the resolution for.
:param path_set: the path set the dependency resolved to.
dependency itself and its immediate transient dependencies.
"""
self._dependency = dependency
self._path_set = path_set
@property
def dependency(self):
"""
A read-only property returning the dependency we resolve.
"""
return self._dependency
@property
def path_set(self):
"""
A property returning the path set our dependency resolved to.
"""
return self._path_set
class DependencyNode(object):
"""
Instances of this class represent a node in a dependency tree.
"""
def __init__(self, dependency: Optional[Dependency] = None, parent: Optional['DependencyNode'] = None):
"""
A function to create instances of the ``DependencyNode`` class.
:param dependency: the dependency this node represents.
:param parent: the parent node of this node.
"""
self._parent = parent
self._dependency = dependency
self._children: Dict[str, DependencyNode] = {}
if parent is not None:
parent._children[dependency.id] = self
@property
def dependency(self) -> Dependency:
"""
A read-only property that returns the dependency this node wraps.
"""
return self._dependency
@property
def parent(self) -> 'DependencyNode':
"""
A read-only property that returns the parent node of this node.
"""
return self._parent
def get_child(self, dependency: Dependency) -> 'DependencyNode':
"""
A function that returns the child node for the given dependency.
:param dependency: the dependency to get the child node for.
:return: the child node for the dependency.
"""
return self._children[dependency.id]
def remove_child(self, dependency: Dependency) -> 'DependencyNode':
"""
A function that removes and returns the child node for the given dependency.
:param dependency: the dependency to remove the child node for.
:return: the child node for the dependency.
"""
return self._children.pop(dependency.id)
def copy(self, parent: 'DependencyNode') -> 'DependencyNode':
"""
A function to create a copy of the tree rooted at this node.
:param parent: the parent for the copy.
:return: the copy of this node.
"""
new_node = DependencyNode(self._dependency, parent)
for child_node in self._children.values():
child_node.copy(new_node)
return new_node
def traceback(self) -> str:
"""
A function that produces a dump of the dependency parentage of the current node.
:return: a multi-line string showing the dependency parentage to this node.
"""
node = self
lines = []
while node.parent is not None:
lines.append(repr(node.dependency))
node = node.parent
lines.reverse()
for index in range(1, len(lines)):
lines[index] = _tree_spacer * (index - 1) + _tree_leader + lines[index]
return '\n'.join(lines)
def depth_first(self) -> Generator['DependencyNode', None, None]:
"""
A function to iterate over the node tree rooted at this node in a depth-first
manner.
:return: the next node.
"""
for child in self._children.values():
for node in child.depth_first():
yield node
yield self
class ResolutionSet(object):
"""
Instances of this class represent a full set of resolved | |
<reponame>m-yuhas/interview_questions<filename>shuffle.py<gh_stars>0
#!/usr/bin/env python
from itertools import product
from random import randint
from sys import modules
from typing import Any, Callable, List
try:
from matplotlib import pyplot
except:
pass
class Card(object):
"""A playing card.
Arguments:
suit: str
The suit of the card
rank: str
The rank of the card
"""
def __init__(self, suit: str, rank: str) -> None:
self.suit = suit
self.rank = rank
def __eq__(self, other: 'Card') -> bool:
"""Return True if this card is the same card as 'other'; otherwise
return False.
Arguments:
other: 'Card'
The card with which to compare this one
Returns:
bool: True if the cards are equal, False otherwise
Raises:
AttributeError: if the object being compared is not a Card and does
not inherit from the Card class
"""
if self.suit == other.suit and self.rank == other.rank:
return True
else:
return False
def __ne__(self, other: 'Card') -> bool:
"""Return False if this card is the same card as 'other'; otherwise
return True.
Arguments:
other: 'Card'
The card with which to compare this one
Returns:
bool: False if the cards are equal, True otherwise
Raises:
AttributeError: if the object being compared is not a Card and does
not inherit from the Card class
"""
return not self.__eq__(other)
def __lt__(self, other: 'Card') -> bool:
"""Return True if this card has a lesser value than the 'other' card;
otherwise Return False.
Arguments:
other: 'Card'
The card with which to compare this one
Returns:
bool: True if lesser than 'other'
Raises:
AttributeError: if the object being compared is not a Card and does
not inherit from the Card class
ValueError: if the one of the objects has an inappropriate rank
"""
if int.from_bytes(self.suit.encode(), byteorder='big') \
< int.from_bytes(other.suit.encode(), byteorder='big'):
return True
elif int.from_bytes(self.suit.encode(), byteorder='big') \
== int.from_bytes(other.suit.encode(), byteorder='big'):
ranks = {'2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8,
'9': 9, '10': 10, 'J': 11, 'Q': 12, 'K': 13, 'A': 14}
return ranks[str(self.rank)] < ranks[str(other.rank)]
else:
return False
def __le__(self, other: 'Card') -> bool:
"""Return True if this card has a value less than or equal to that of
the 'other' card; otherwise Return False.
Arguments:
other: 'Card'
The card with which to compare this one
Returns:
bool: True if less than or equal to 'other'
Raises:
AttributeError: if the object being compared is not a Card and does
not inherit from the Card class
ValueError: if the one of the objects has an inappropriate rank
"""
return self.__eq__(other) or self.__lt__(other)
def __gt__(self, other: 'Card') -> bool:
"""Return True if this card has a value greater than that of the 'other'
card; otherwise Return False.
Arguments:
other: 'Card'
The card with which to compare this one
Returns:
bool: True if greater than 'other'
Raises:
AttributeError: if the object being compared is not a Card and does
not inherit from the Card class
ValueError: if the one of the objects has an inappropriate rank
"""
return not self.__le__(other)
def __ge__(self, other: 'Card') -> bool:
"""Return True if this card has a value greater than or equal to that of
the 'other' card; otherwise Return False.
Arguments:
other: 'Card'
The card with which to compare this one
Returns:
bool: True if greater than or equal to 'other'
Raises:
AttributeError: if the object being compared is not a Card and does
not inherit from the Card class
ValueError: if the one of the objects has an inappropriate rank
"""
return not self.__lt__(other)
def __str__(self) -> str:
"""Return a string representing this card.
Returns:
str: a string representing this card.
"""
return '{}{}'.format(self.rank, self.suit)
def shuffle(deck: List[Any]) -> List[Any]:
"""Shuffle a list of objects in O(n) time.
Arguments:
deck List[Any]
The list of objects to shuffle
Returns:
List[Any]: the shuffled list
"""
for i in range(len(deck) - 1):
j = randint(i, len(deck) - 1)
temp = deck[i]
deck[i] = deck[j]
deck[j] = temp
return deck
def get_order_bias(shuffle_algorithm: Callable[[List[Any]], List[Any]],
deck_size: int,
iterations: int) -> List[List[float]]:
"""Find the order bias for any given shuffle algorithm.
Arguments:
shuffle_algorithm: Callable[List[Any], List[Any]]
Shuffle function that takes a list of objects and returns the
shuffled list
deck: int
The size of the deck to use when analyzing the order bias
iterations: int
Number of shuffles to simulate
Returns:
List[List[float]]: a 2-dimensional array of floats where List(x,y) is
the probability of the object at position x in the input deck ending
up at position y in the output deck
"""
bias_matrix = [[0 for col in range(deck_size)] for row in range(deck_size)]
for i in range(iterations):
shuffled_deck = shuffle_algorithm([i for i in range(deck_size)])
for i in range(deck_size):
bias_matrix[i][shuffled_deck[i]] += 1
for (x, y) in [(x, y) for x in range(deck_size) for y in range(deck_size)]:
bias_matrix[x][y] = bias_matrix[x][y] / float(deck_size * iterations)
return bias_matrix
def unstable_sort(deck: List[Any]) -> List[Any]:
"""Sort of list of object, assuming that <, ==, and > are implement for the
objects in the list.
This function implements a heap sort, which has an average performance of
O(nlog(n)) and a worst case performance of O(nlog(n)). Its memory usage is
O(n). This sort is not stable so the order between identical objects in the
input array is not preserved.
Arguments:
deck: List[Any]
List of objects to be sorted
Returns:
List[Any]: sorted list of objects
"""
heap = []
for i in range(len(deck)):
heap.append(deck[i])
j = i
while j > 0:
if j % 2 == 0 and heap[(j - 1) // 2] < heap[j]:
temp = heap[j]
heap[j] = heap[(j - 1) // 2]
heap[(j - 1) // 2] = temp
j = (j - 1) // 2
elif j % 2 == 1 and heap[(j - 1) // 2] < heap[j]:
temp = heap[j]
heap[j] = heap[(j - 1) // 2]
heap[(j - 1) // 2] = temp
j = (j - 1) // 2
else:
break
for i in range(len(heap) - 1, 0, -1):
temp = heap[i]
heap[i] = heap[0]
heap[0] = temp
j = 0
while ((j + 1) * 2) - 1 < i:
if (j + 1) * 2 < i \
and heap[(j + 1) * 2] > heap[((j + 1) * 2) - 1] \
and heap[(j + 1) * 2] > heap[j]:
temp = heap[j]
heap[j] = heap[((j + 1) * 2)]
heap[(j + 1) * 2] = temp
j = (j + 1) * 2
elif heap[((j + 1) * 2) - 1] > heap[j]:
temp = heap[j]
heap[j] = heap[((j + 1) * 2) - 1]
heap[((j + 1) * 2) - 1] = temp
j = ((j + 1) * 2) - 1
else:
break
return heap
def stable_sort(deck: List[Any]) -> List[Any]:
"""Sort of list of object, assuming that <, ==, and > are implement for the
objects in the list.
This function implements a merge sort, which has an average performance of
O(nlog(n)) and a worst case performance of O(nlog(n)). Although merge sort
can have memory usage of O(n), because Python requires passing by value and
not by reference, this implementation uses O(nlog(n)) memory. This sort is
stable so the order between identical objects in the input array is
preserved.
Arguments:
deck: List[Any]
List of objects to be sorted
Returns:
List[Any]: sorted list of objects
"""
if len(deck) > 1:
first = stable_sort(deck[:len(deck) // 2])
last = stable_sort(deck[(len(deck) // 2):])
i = 0
j = 0
while i < len(first) or j < len(last):
if i >= len(first):
deck[i + j] = last[j]
j += 1
elif j >= len(last):
deck[i + j] = first[i]
i += 1
elif first[i] < last[j]:
deck[i + j] = first[i]
i += 1
else:
deck[i + j] = last[j]
j += 1
return deck
if __name__ == '__main__':
suits = ['♠', '♣', '♡', '♢']
ranks = [2, 3, 4, 5, 6, 7, 8, 9, 10, 'J', | |
bace_user_specified_features = [
'MW', 'AlogP', 'HBA', 'HBD', 'RB', 'HeavyAtomCount', 'ChiralCenterCount',
'ChiralCenterCountAllPossible', 'RingCount', 'PSA', 'Estate', 'MR', 'Polar',
'sLi_Key', 'ssBe_Key', 'ssssBem_Key', 'sBH2_Key', 'ssBH_Key', 'sssB_Key',
'ssssBm_Key', 'sCH3_Key', 'dCH2_Key', 'ssCH2_Key', 'tCH_Key', 'dsCH_Key',
'aaCH_Key', 'sssCH_Key', 'ddC_Key', 'tsC_Key', 'dssC_Key', 'aasC_Key',
'aaaC_Key', 'ssssC_Key', 'sNH3_Key', 'sNH2_Key', 'ssNH2_Key', 'dNH_Key',
'ssNH_Key', 'aaNH_Key', 'tN_Key', 'sssNH_Key', 'dsN_Key', 'aaN_Key',
'sssN_Key', 'ddsN_Key', 'aasN_Key', 'ssssN_Key', 'daaN_Key', 'sOH_Key',
'dO_Key', 'ssO_Key', 'aaO_Key', 'aOm_Key', 'sOm_Key', 'sF_Key', 'sSiH3_Key',
'ssSiH2_Key', 'sssSiH_Key', 'ssssSi_Key', 'sPH2_Key', 'ssPH_Key',
'sssP_Key', 'dsssP_Key', 'ddsP_Key', 'sssssP_Key', 'sSH_Key', 'dS_Key',
'ssS_Key', 'aaS_Key', 'dssS_Key', 'ddssS_Key', 'ssssssS_Key', 'Sm_Key',
'sCl_Key', 'sGeH3_Key', 'ssGeH2_Key', 'sssGeH_Key', 'ssssGe_Key',
'sAsH2_Key', 'ssAsH_Key', 'sssAs_Key', 'dsssAs_Key', 'ddsAs_Key',
'sssssAs_Key', 'sSeH_Key', 'dSe_Key', 'ssSe_Key', 'aaSe_Key', 'dssSe_Key',
'ssssssSe_Key', 'ddssSe_Key', 'sBr_Key', 'sSnH3_Key', 'ssSnH2_Key',
'sssSnH_Key', 'ssssSn_Key', 'sI_Key', 'sPbH3_Key', 'ssPbH2_Key',
'sssPbH_Key', 'ssssPb_Key', 'sLi_Cnt', 'ssBe_Cnt', 'ssssBem_Cnt',
'sBH2_Cnt', 'ssBH_Cnt', 'sssB_Cnt', 'ssssBm_Cnt', 'sCH3_Cnt', 'dCH2_Cnt',
'ssCH2_Cnt', 'tCH_Cnt', 'dsCH_Cnt', 'aaCH_Cnt', 'sssCH_Cnt', 'ddC_Cnt',
'tsC_Cnt', 'dssC_Cnt', 'aasC_Cnt', 'aaaC_Cnt', 'ssssC_Cnt', 'sNH3_Cnt',
'sNH2_Cnt', 'ssNH2_Cnt', 'dNH_Cnt', 'ssNH_Cnt', 'aaNH_Cnt', 'tN_Cnt',
'sssNH_Cnt', 'dsN_Cnt', 'aaN_Cnt', 'sssN_Cnt', 'ddsN_Cnt', 'aasN_Cnt',
'ssssN_Cnt', 'daaN_Cnt', 'sOH_Cnt', 'dO_Cnt', 'ssO_Cnt', 'aaO_Cnt',
'aOm_Cnt', 'sOm_Cnt', 'sF_Cnt', 'sSiH3_Cnt', 'ssSiH2_Cnt', 'sssSiH_Cnt',
'ssssSi_Cnt', 'sPH2_Cnt', 'ssPH_Cnt', 'sssP_Cnt', 'dsssP_Cnt', 'ddsP_Cnt',
'sssssP_Cnt', 'sSH_Cnt', 'dS_Cnt', 'ssS_Cnt', 'aaS_Cnt', 'dssS_Cnt',
'ddssS_Cnt', 'ssssssS_Cnt', 'Sm_Cnt', 'sCl_Cnt', 'sGeH3_Cnt', 'ssGeH2_Cnt',
'sssGeH_Cnt', 'ssssGe_Cnt', 'sAsH2_Cnt', 'ssAsH_Cnt', 'sssAs_Cnt',
'dsssAs_Cnt', 'ddsAs_Cnt', 'sssssAs_Cnt', 'sSeH_Cnt', 'dSe_Cnt', 'ssSe_Cnt',
'aaSe_Cnt', 'dssSe_Cnt', 'ssssssSe_Cnt', 'ddssSe_Cnt', 'sBr_Cnt',
'sSnH3_Cnt', 'ssSnH2_Cnt', 'sssSnH_Cnt', 'ssssSn_Cnt', 'sI_Cnt',
'sPbH3_Cnt', 'ssPbH2_Cnt', 'sssPbH_Cnt', 'ssssPb_Cnt', 'sLi_Sum',
'ssBe_Sum', 'ssssBem_Sum', 'sBH2_Sum', 'ssBH_Sum', 'sssB_Sum', 'ssssBm_Sum',
'sCH3_Sum', 'dCH2_Sum', 'ssCH2_Sum', 'tCH_Sum', 'dsCH_Sum', 'aaCH_Sum',
'sssCH_Sum', 'ddC_Sum', 'tsC_Sum', 'dssC_Sum', 'aasC_Sum', 'aaaC_Sum',
'ssssC_Sum', 'sNH3_Sum', 'sNH2_Sum', 'ssNH2_Sum', 'dNH_Sum', 'ssNH_Sum',
'aaNH_Sum', 'tN_Sum', 'sssNH_Sum', 'dsN_Sum', 'aaN_Sum', 'sssN_Sum',
'ddsN_Sum', 'aasN_Sum', 'ssssN_Sum', 'daaN_Sum', 'sOH_Sum', 'dO_Sum',
'ssO_Sum', 'aaO_Sum', 'aOm_Sum', 'sOm_Sum', 'sF_Sum', 'sSiH3_Sum',
'ssSiH2_Sum', 'sssSiH_Sum', 'ssssSi_Sum', 'sPH2_Sum', 'ssPH_Sum',
'sssP_Sum', 'dsssP_Sum', 'ddsP_Sum', 'sssssP_Sum', 'sSH_Sum', 'dS_Sum',
'ssS_Sum', 'aaS_Sum', 'dssS_Sum', 'ddssS_Sum', 'ssssssS_Sum', 'Sm_Sum',
'sCl_Sum', 'sGeH3_Sum', 'ssGeH2_Sum', 'sssGeH_Sum', 'ssssGe_Sum',
'sAsH2_Sum', 'ssAsH_Sum', 'sssAs_Sum', 'dsssAs_Sum', 'ddsAs_Sum',
'sssssAs_Sum', 'sSeH_Sum', 'dSe_Sum', 'ssSe_Sum', 'aaSe_Sum', 'dssSe_Sum',
'ssssssSe_Sum', 'ddssSe_Sum', 'sBr_Sum', 'sSnH3_Sum', 'ssSnH2_Sum',
'sssSnH_Sum', 'ssssSn_Sum', 'sI_Sum', 'sPbH3_Sum', 'ssPbH2_Sum',
'sssPbH_Sum', 'ssssPb_Sum', 'sLi_Avg', 'ssBe_Avg', 'ssssBem_Avg',
'sBH2_Avg', 'ssBH_Avg', 'sssB_Avg', 'ssssBm_Avg', 'sCH3_Avg', 'dCH2_Avg',
'ssCH2_Avg', 'tCH_Avg', 'dsCH_Avg', 'aaCH_Avg', 'sssCH_Avg', 'ddC_Avg',
'tsC_Avg', 'dssC_Avg', 'aasC_Avg', 'aaaC_Avg', 'ssssC_Avg', 'sNH3_Avg',
'sNH2_Avg', 'ssNH2_Avg', 'dNH_Avg', 'ssNH_Avg', 'aaNH_Avg', 'tN_Avg',
'sssNH_Avg', 'dsN_Avg', 'aaN_Avg', 'sssN_Avg', 'ddsN_Avg', 'aasN_Avg',
'ssssN_Avg', 'daaN_Avg', 'sOH_Avg', 'dO_Avg', 'ssO_Avg', 'aaO_Avg',
'aOm_Avg', 'sOm_Avg', 'sF_Avg', 'sSiH3_Avg', 'ssSiH2_Avg', 'sssSiH_Avg',
'ssssSi_Avg', 'sPH2_Avg', 'ssPH_Avg', 'sssP_Avg', 'dsssP_Avg', 'ddsP_Avg',
'sssssP_Avg', 'sSH_Avg', 'dS_Avg', 'ssS_Avg', 'aaS_Avg', 'dssS_Avg',
'ddssS_Avg', 'ssssssS_Avg', 'Sm_Avg', 'sCl_Avg', 'sGeH3_Avg', 'ssGeH2_Avg',
'sssGeH_Avg', 'ssssGe_Avg', 'sAsH2_Avg', 'ssAsH_Avg', 'sssAs_Avg',
'dsssAs_Avg', 'ddsAs_Avg', 'sssssAs_Avg', 'sSeH_Avg', 'dSe_Avg', 'ssSe_Avg',
'aaSe_Avg', 'dssSe_Avg', 'ssssssSe_Avg', 'ddssSe_Avg', 'sBr_Avg',
'sSnH3_Avg', 'ssSnH2_Avg', 'sssSnH_Avg', 'ssssSn_Avg', 'sI_Avg',
'sPbH3_Avg', 'ssPbH2_Avg', 'sssPbH_Avg', 'ssssPb_Avg', 'First Zagreb (ZM1)',
'First Zagreb index by valence vertex degrees (ZM1V)',
'Second Zagreb (ZM2)',
'Second Zagreb index by valence vertex degrees (ZM2V)', 'Polarity (Pol)',
'Narumi Simple Topological (NST)', 'Narumi Harmonic Topological (NHT)',
'Narumi Geometric Topological (NGT)', 'Total structure connectivity (TSC)',
'Wiener (W)', 'Mean Wiener (MW)', 'Xu (Xu)', 'Quadratic (QIndex)',
'Radial centric (RC)', 'Mean Square Distance Balaban (MSDB)',
'Superpendentic (SP)', 'Harary (Har)', 'Log of product of row sums (LPRS)',
'Pogliani (Pog)', 'Schultz Molecular Topological (SMT)',
'Schultz Molecular Topological by valence vertex degrees (SMTV)',
'Mean Distance Degree Deviation (MDDD)', 'Ramification (Ram)',
'Gutman Molecular Topological (GMT)',
'Gutman MTI by valence vertex degrees (GMTV)',
'Average vertex distance degree (AVDD)', 'Unipolarity (UP)',
'Centralization (CENT)', 'Variation (VAR)',
'Molecular electrotopological variation (MEV)',
'Maximal electrotopological positive variation (MEPV)',
'Maximal electrotopological negative variation (MENV)',
'Eccentric connectivity (ECCc)', 'Eccentricity (ECC)',
'Average eccentricity (AECC)', 'Eccentric (DECC)',
'Valence connectivity index chi-0 (vX0)',
'Valence connectivity index chi-1 (vX1)',
'Valence connectivity index chi-2 (vX2)',
'Valence connectivity index chi-3 (vX3)',
'Valence connectivity index chi-4 (vX4)',
'Valence connectivity index chi-5 (vX5)',
'Average valence connectivity index chi-0 (AvX0)',
'Average valence connectivity index chi-1 (AvX1)',
'Average valence connectivity index chi-2 (AvX2)',
'Average valence connectivity index chi-3 (AvX3)',
'Average valence connectivity index chi-4 (AvX4)',
'Average valence connectivity index chi-5 (AvX5)', 'Quasi Wiener (QW)',
'First Mohar (FM)', 'Second Mohar (SM)', 'Spanning tree number (STN)',
'Kier benzene-likeliness index (KBLI)',
'Topological charge index of order 1 (TCI1)',
'Topological charge index of order 2 (TCI2)',
'Topological charge index of order 3 (TCI3)',
'Topological charge index of order 4 (TCI4)',
'Topological charge index of order 5 (TCI5)',
'Topological charge index of order 6 (TCI6)',
'Topological charge index of order 7 (TCI7)',
'Topological charge index of order 8 (TCI8)',
'Topological charge index of order 9 (TCI9)',
'Topological charge index of order 10 (TCI10)',
'Mean topological charge index of order 1 (MTCI1)',
'Mean topological charge index of order 2 (MTCI2)',
'Mean topological charge index of order 3 (MTCI3)',
'Mean topological charge index of order 4 (MTCI4)',
'Mean topological charge index of order 5 (MTCI5)',
'Mean topological charge index of order 6 (MTCI6)',
'Mean topological charge index of order 7 (MTCI7)',
'Mean topological charge index of order 8 (MTCI8)',
'Mean topological charge index of order 9 (MTCI9)',
'Mean topological charge index of order 10 (MTCI10)',
'Global topological charge (GTC)', 'Hyper-distance-path index (HDPI)',
'Reciprocal hyper-distance-path index (RHDPI)',
'Square reciprocal distance sum (SRDS)',
'Modified Randic connectivity (MRC)', 'Balaban centric (BC)',
'Lopping centric (LC)', 'Kier Hall electronegativity (KHE)',
'Sum of topological distances between N..N (STD(N N))',
'Sum of topological distances between N..O (STD(N O))',
'Sum of topological distances between N..S (STD(N S))',
'Sum of topological distances between N..P (STD(N P))',
'Sum of topological distances between N..F (STD(N F))',
'Sum of topological distances between N..Cl (STD(N Cl))',
'Sum of topological distances between N..Br (STD(N Br))',
'Sum of topological distances between N..I (STD(N I))',
'Sum of topological distances between O..O (STD(O O))',
'Sum of topological distances between O..S (STD(O S))',
'Sum of topological distances between O..P (STD(O P))',
'Sum of topological distances between O..F (STD(O F))',
'Sum of topological distances between O..Cl (STD(O Cl))',
'Sum of topological distances between O..Br (STD(O Br))',
'Sum of topological distances between O..I (STD(O I))',
'Sum of topological distances between S..S (STD(S S))',
'Sum of topological distances between S..P (STD(S P))',
'Sum of topological distances between S..F (STD(S F))',
'Sum of topological distances between S..Cl (STD(S Cl))',
'Sum of topological distances between S..Br (STD(S Br))',
'Sum of topological distances between S..I (STD(S I))',
'Sum of topological distances between P..P (STD(P P))',
'Sum of topological distances between P..F (STD(P F))',
'Sum of topological distances between P..Cl (STD(P Cl))',
'Sum of topological distances between P..Br (STD(P Br))',
'Sum of topological distances between P..I (STD(P I))',
'Sum of topological distances between F..F (STD(F F))',
'Sum of topological distances between F..Cl (STD(F Cl))',
'Sum of topological distances between F..Br (STD(F Br))',
'Sum of topological distances between F..I (STD(F I))',
'Sum of topological distances between Cl..Cl (STD(Cl Cl))',
'Sum of topological distances between Cl..Br (STD(Cl Br))',
'Sum of topological distances between Cl..I (STD(Cl I))',
'Sum of topological distances between Br..Br (STD(Br Br))',
'Sum of topological distances between Br..I (STD(Br I))',
'Sum of topological distances between I..I (STD(I I))',
'Wiener-type index from Z weighted distance matrix - Barysz matrix (WhetZ)',
'Wiener-type index from electronegativity weighted distance matrix (Whete)',
'Wiener-type index from mass weighted distance matrix (Whetm)',
'Wiener-type index from van der waals weighted distance matrix (Whetv)',
'Wiener-type index from polarizability weighted distance matrix (Whetp)',
'Balaban-type index from Z weighted distance matrix - Barysz matrix (JhetZ)',
'Balaban-type index from electronegativity weighted distance matrix (Jhete)',
'Balaban-type index from mass weighted distance matrix (Jhetm)',
'Balaban-type index from van der waals weighted distance matrix (Jhetv)',
'Balaban-type index from polarizability weighted distance matrix (Jhetp)',
'Topological diameter (TD)', 'Topological radius (TR)',
'Petitjean 2D shape (PJ2DS)', 'Balaban distance connectivity index (J)',
'Solvation connectivity index chi-0 (SCIX0)',
'Solvation connectivity index chi-1 (SCIX1)',
'Solvation connectivity index chi-2 (SCIX2)',
'Solvation connectivity index chi-3 (SCIX3)',
'Solvation connectivity index chi-4 (SCIX4)',
'Solvation connectivity index chi-5 (SCIX5)',
'Connectivity index chi-0 (CIX0)',
'Connectivity chi-1 [Randic connectivity] (CIX1)',
'Connectivity index chi-2 (CIX2)', 'Connectivity index chi-3 (CIX3)',
'Connectivity index chi-4 (CIX4)', 'Connectivity index chi-5 (CIX5)',
'Average connectivity index chi-0 (ACIX0)',
'Average connectivity index chi-1 (ACIX1)',
'Average connectivity index chi-2 (ACIX2)',
'Average connectivity index chi-3 (ACIX3)',
'Average connectivity index chi-4 (ACIX4)',
'Average connectivity index chi-5 (ACIX5)',
'reciprocal distance Randic-type index (RDR)',
'reciprocal distance square Randic-type index (RDSR)',
'1-path Kier alpha-modified shape index (KAMS1)',
'2-path Kier alpha-modified shape index (KAMS2)',
'3-path Kier alpha-modified shape index (KAMS3)', 'Kier flexibility (KF)',
'path/walk 2 - Randic | |
<filename>pongAI.py
#PONG pygame
import random
import pygame, sys
from pygame.locals import *
import pong_config
pygame.init()
fps = pygame.time.Clock()
#colors
WHITE = (255,255,255)
RED = (255,0,0)
GREEN = (0,255,0)
BLACK = (0,0,0)
#globals
WIDTH = pong_config.WIDTH
HEIGHT = pong_config.HEIGHT
BALL_RADIUS = pong_config.BALL_RADIUS
PAD_WIDTH = pong_config.PAD_WIDTH
PAD_HEIGHT = pong_config.PAD_HEIGHT
PAD_SPACE = pong_config.PAD_SPACE
HALF_PAD_WIDTH = PAD_WIDTH // 2
HALF_PAD_HEIGHT = PAD_HEIGHT // 2
ball_num = pong_config.ball_num
colorlist = [RED] * ball_num
ball_pos = [[0,0]] * ball_num
ball_vel = [[0,0]] * ball_num
paddle1_vel = [0, 0]
paddle2_vel = [0, 0]
paddle1_pos = [[0, 0], [0, 0]]
paddle2_pos = [[0, 0], [0, 0]]
l_score = 0
r_score = 0
horz = 2
vert = -3
defense = 0
#canvas declaration
window = pygame.display.set_mode((WIDTH, HEIGHT), 0, 32)
pygame.display.set_caption('Hello World')
reward = 0
# helper function that spawns a ball, returns a position vector and a velocity vector
# if right is True, spawn to the right, else spawn to the left
def ball_init(id):
global ball_pos, ball_vel, vert, horz, defense # these are vectors stored as lists
ball_pos[id] = [WIDTH//2,HEIGHT//2]
# horz = random.randrange(2,4)
# vert = random.randrange(-3,3)
horz = horz + 1 if horz != 4 else 2
vert = horz + 1 if horz != 3 else -3
if vert == 0:
vert = vert + 1
colorlist[id] = (random.randrange(0,255),random.randrange(0,255),random.randrange(0,255))
if defense == 0:
horz = - horz
# self.defense = 1 if self.defense == 0 else 0
ball_vel[id] = [horz,-vert]
# define event handlers
def init():
global paddle1_pos, paddle2_pos, paddle1_vel, paddle2_vel,l_score,r_score # these are floats
global score1, score2 # these are ints
paddle1_pos[0] = [HALF_PAD_WIDTH - 1,HEIGHT//2]
paddle2_pos[0] = [WIDTH +1 - HALF_PAD_WIDTH,HEIGHT//2]
paddle1_pos[1] = [HALF_PAD_WIDTH - 1 + PAD_SPACE,HEIGHT//2]
paddle2_pos[1] = [WIDTH +1 - HALF_PAD_WIDTH - PAD_SPACE,HEIGHT//2]
l_score = 0
r_score = 0
for i in range(ball_num):
ball_init(i)
#draw function of canvas
def draw(canvas):
global paddle1_pos, paddle2_pos, ball_pos, ball_vel, l_score, r_score
global reward
canvas.fill(BLACK)
pygame.draw.line(canvas, WHITE, [WIDTH // 2, 0],[WIDTH // 2, HEIGHT], 1)
pygame.draw.line(canvas, WHITE, [PAD_WIDTH, 0],[PAD_WIDTH, HEIGHT], 1)
pygame.draw.line(canvas, WHITE, [WIDTH - PAD_WIDTH, 0],[WIDTH - PAD_WIDTH, HEIGHT], 1)
pygame.draw.circle(canvas, WHITE, [WIDTH//2, HEIGHT//2], 70, 1)
# update paddle's vertical position, keep paddle on the screen
if paddle1_pos[0][1] > HALF_PAD_HEIGHT and paddle1_pos[0][1] < HEIGHT - HALF_PAD_HEIGHT:
paddle1_pos[0][1] += paddle1_vel[0]
elif paddle1_pos[0][1] <= HALF_PAD_HEIGHT and paddle1_vel[0] > 0:
paddle1_pos[0][1] += paddle1_vel[0]
elif paddle1_pos[0][1] >= HEIGHT - HALF_PAD_HEIGHT and paddle1_vel[0] < 0:
paddle1_pos[0][1] += paddle1_vel[0]
if paddle1_pos[1][1] > HALF_PAD_HEIGHT and paddle1_pos[1][1] < HEIGHT - HALF_PAD_HEIGHT:
paddle1_pos[1][1] += paddle1_vel[1]
elif paddle1_pos[1][1] <= HALF_PAD_HEIGHT and paddle1_vel[1] > 0:
paddle1_pos[1][1] += paddle1_vel[1]
elif paddle1_pos[1][1] >= HEIGHT - HALF_PAD_HEIGHT and paddle1_vel[1] < 0:
paddle1_pos[1][1] += paddle1_vel[1]
if paddle2_pos[0][1] > HALF_PAD_HEIGHT and paddle2_pos[0][1] < HEIGHT - HALF_PAD_HEIGHT:
paddle2_pos[0][1] += paddle2_vel[0]
elif paddle2_pos[0][1] <= HALF_PAD_HEIGHT and paddle2_vel[0] > 0:
paddle2_pos[0][1] += paddle2_vel[0]
elif paddle2_pos[0][1] >= HEIGHT - HALF_PAD_HEIGHT and paddle2_vel[0] < 0:
paddle2_pos[0][1] += paddle2_vel[0]
if paddle2_pos[1][1] > HALF_PAD_HEIGHT and paddle2_pos[1][1] < HEIGHT - HALF_PAD_HEIGHT:
paddle2_pos[1][1] += paddle2_vel[1]
elif paddle2_pos[1][1] <= HALF_PAD_HEIGHT and paddle2_vel[1] > 0:
paddle2_pos[1][1] += paddle2_vel[1]
elif paddle2_pos[1][1] >= HEIGHT - HALF_PAD_HEIGHT and paddle2_vel[1] < 0:
paddle2_pos[1][1] += paddle2_vel[1]
#update ball
for i in range(ball_num):
ball_pos[i][0] += int(ball_vel[i][0])
ball_pos[i][1] += int(ball_vel[i][1])
#draw paddles and ball
# pygame.draw.circle(canvas, RED, ball_pos, 20, 0)
for i in range(ball_num):
pygame.draw.rect(canvas, colorlist[i], [ball_pos[i][0]-BALL_RADIUS, ball_pos[i][1]-BALL_RADIUS, BALL_RADIUS * 2, BALL_RADIUS * 2], 2)
pygame.draw.polygon(canvas, GREEN, [[paddle1_pos[0][0] - HALF_PAD_WIDTH, paddle1_pos[0][1] - HALF_PAD_HEIGHT], [paddle1_pos[0][0] - HALF_PAD_WIDTH, paddle1_pos[0][1] + HALF_PAD_HEIGHT], [paddle1_pos[0][0] + HALF_PAD_WIDTH, paddle1_pos[0][1] + HALF_PAD_HEIGHT], [paddle1_pos[0][0] + HALF_PAD_WIDTH, paddle1_pos[0][1] - HALF_PAD_HEIGHT]], 0)
pygame.draw.polygon(canvas, GREEN, [[paddle2_pos[0][0] - HALF_PAD_WIDTH, paddle2_pos[0][1] - HALF_PAD_HEIGHT], [paddle2_pos[0][0] - HALF_PAD_WIDTH, paddle2_pos[0][1] + HALF_PAD_HEIGHT], [paddle2_pos[0][0] + HALF_PAD_WIDTH, paddle2_pos[0][1] + HALF_PAD_HEIGHT], [paddle2_pos[0][0] + HALF_PAD_WIDTH, paddle2_pos[0][1] - HALF_PAD_HEIGHT]], 0)
pygame.draw.polygon(canvas, GREEN, [[paddle1_pos[1][0] - HALF_PAD_WIDTH, paddle1_pos[1][1] - HALF_PAD_HEIGHT], [paddle1_pos[1][0] - HALF_PAD_WIDTH, paddle1_pos[1][1] + HALF_PAD_HEIGHT], [paddle1_pos[1][0] + HALF_PAD_WIDTH, paddle1_pos[1][1] + HALF_PAD_HEIGHT], [paddle1_pos[1][0] + HALF_PAD_WIDTH, paddle1_pos[1][1] - HALF_PAD_HEIGHT]], 0)
pygame.draw.polygon(canvas, GREEN, [[paddle2_pos[1][0] - HALF_PAD_WIDTH, paddle2_pos[1][1] - HALF_PAD_HEIGHT], [paddle2_pos[1][0] - HALF_PAD_WIDTH, paddle2_pos[1][1] + HALF_PAD_HEIGHT], [paddle2_pos[1][0] + HALF_PAD_WIDTH, paddle2_pos[1][1] + HALF_PAD_HEIGHT], [paddle2_pos[1][0] + HALF_PAD_WIDTH, paddle2_pos[1][1] - HALF_PAD_HEIGHT]], 0)
#ball collision check on top and bottom walls
for i in range(ball_num):
if int(ball_pos[i][1]) <= BALL_RADIUS:
ball_vel[i][1] = - ball_vel[i][1]
if int(ball_pos[i][1]) >= HEIGHT + 1 - BALL_RADIUS:
ball_vel[i][1] = -ball_vel[i][1]
#ball collison check on gutters or paddles
for i in range(ball_num):
for q in range(2):
if int(ball_pos[i][0]) + BALL_RADIUS + ball_vel[i][0] >= paddle1_pos[q][0] - PAD_WIDTH and int(ball_pos[i][0]) <= paddle1_pos[q][0] - PAD_WIDTH and\
int(ball_pos[i][1]) >= paddle1_pos[q][1] - HALF_PAD_HEIGHT - BALL_RADIUS and int(ball_pos[i][1]) <= paddle1_pos[q][1] + HALF_PAD_HEIGHT:
ball_vel[i][0] = -abs(ball_vel[i][0])
ball_vel[i][0] *= 1.2
ball_vel[i][1] *= 1.2
reward -= 5 + abs(ball_vel[i][0])
elif int(ball_pos[i][0]) - BALL_RADIUS + ball_vel[i][0] <= paddle1_pos[q][0] + PAD_WIDTH and int(ball_pos[i][0]) >= paddle1_pos[q][0] + PAD_WIDTH and\
int(ball_pos[i][1]) >= paddle1_pos[q][1] - HALF_PAD_HEIGHT - BALL_RADIUS and int(ball_pos[i][1]) <= paddle1_pos[q][1] + HALF_PAD_HEIGHT:
ball_vel[i][0] = abs(ball_vel[i][0])
ball_vel[i][0] *= 1.2
ball_vel[i][1] *= 1.2
reward += 5 + abs(ball_vel[i][0])
elif int(ball_pos[i][0]) <= BALL_RADIUS + PAD_WIDTH:
r_score += 1
reward -= 10
ball_init(i)
for i in range(ball_num):
for q in range(2):
if int(ball_pos[i][0]) + BALL_RADIUS + ball_vel[i][0] >= paddle2_pos[q][0] - PAD_WIDTH and ball_pos[i][0] <= paddle2_pos[q][0] - PAD_WIDTH and\
random.randrange(1, 20) < 19:
# int(ball_pos[i][1]) >= paddle2_pos[q][1] - HALF_PAD_HEIGHT - BALL_RADIUS and int(ball_pos[i][1]) <= paddle2_pos[q][1] + HALF_PAD_HEIGHT:
ball_vel[i][0] = -(ball_vel[i][0])
ball_vel[i][0] *= 1.2
ball_vel[i][1] *= 1.2
elif int(ball_pos[i][0]) - BALL_RADIUS + ball_vel[i][0] <= paddle2_pos[q][0] + PAD_WIDTH and ball_pos[i][0] >= paddle2_pos[q][0] + PAD_WIDTH and\
random.randrange(1, 20) < 9:
# int(ball_pos[i][1]) >= paddle2_pos[q][1] - HALF_PAD_HEIGHT - BALL_RADIUS and int(ball_pos[i][1]) <= paddle2_pos[q][1] + HALF_PAD_HEIGHT:
ball_vel[i][0] = abs(ball_vel[i][0])
ball_vel[i][0] *= 1.2
ball_vel[i][1] *= 1.2
elif int(ball_pos[i][0]) >= WIDTH + 1 - BALL_RADIUS - PAD_WIDTH:
l_score += 1
ball_init(i)
#update scores
myfont1 = pygame.font.SysFont("Comic Sans MS", 20)
label1 = myfont1.render("Score "+str(l_score), 1, (255,255,0))
canvas.blit(label1, (10,20))
myfont2 = pygame.font.SysFont("Comic Sans MS", 20)
label2 = myfont2.render("Score "+str(r_score), 1, (255,255,0))
canvas.blit(label2, (100, 20))
myfont3 = pygame.font.SysFont("Comic Sans MS", 20)
label2 = myfont3.render("reward "+str(int(reward)), 1, (255,255,0))
canvas.blit(label2, (190, 20))
#keydown handler
def keydown(event):
global paddle1_vel, paddle2_vel
if event.key == K_UP:
paddle2_vel[0] = -8
elif event.key == K_DOWN:
paddle2_vel[0] = 8
# elif event.key == K_w:
# paddle1_vel[0] = -8
# elif event.key == K_s:
# paddle1_vel[0] = 8
elif event.key == K_u:
paddle2_vel[1] = -8
elif event.key == K_j:
paddle2_vel[1] = 8
# elif event.key == K_y:
# paddle1_vel[1] = -8
# elif event.key == K_h:
# paddle1_vel[1] = 8
#keyup handler
def keyup(event):
global paddle1_vel, paddle2_vel
if event.key in (K_w, K_s):
# paddle1_vel[0] = 0
pass
elif event.key in (K_UP, K_DOWN):
paddle2_vel[0] = 0
elif event.key in (K_u, K_j):
paddle2_vel[1] = 0
# elif event.key in (K_y, K_h):
# paddle1_vel[1] = 0
init()
#### PPO begin
import torch
import torch.nn as nn
from torch.distributions import MultivariateNormal
from torch.distributions import Categorical
device = torch.device('cpu')
if(torch.cuda.is_available()):
device = torch.device('cuda:0')
torch.cuda.empty_cache()
class RolloutBuffer:
def __init__(self):
self.actions = []
self.states = []
self.logprobs = []
self.rewards = []
self.is_terminals = []
def clear(self):
del self.actions[:]
del self.states[:]
del self.logprobs[:]
del self.rewards[:]
del self.is_terminals[:]
class ActorCritic(nn.Module):
def __init__(self, state_dim, action_dim, action_std_init):
super(ActorCritic, self).__init__()
self.actor = nn.Sequential(
torch.quantization.QuantStub(),
nn.Linear(state_dim, 64),
nn.ReLU(),
nn.Linear(64, 64),
nn.ReLU(),
nn.Linear(64, 64),
nn.ReLU(),
nn.Linear(64, 64),
nn.ReLU(),
nn.Linear(64, action_dim),
torch.quantization.DeQuantStub(),
nn.Softmax(dim=-1)
)
# critic
self.critic = nn.Sequential(
nn.Linear(state_dim, 64),
nn.ReLU(),
nn.Linear(64, 64),
nn.ReLU(),
nn.Linear(64, 64),
nn.ReLU(),
nn.Linear(64, 64),
nn.ReLU(),
nn.Linear(64, 1)
)
def act(self, state):
action_probs = self.actor(state)
dist = Categorical(action_probs)
action = dist.sample()
action_logprob = dist.log_prob(action)
return action.detach(), action_logprob.detach()
class PPO:
def __init__(self, state_dim, action_dim, lr_actor, lr_critic, gamma, K_epochs, eps_clip, action_std_init=0.6):
self.gamma = gamma
self.eps_clip = eps_clip
self.K_epochs = K_epochs
self.policy = ActorCritic(state_dim, action_dim, action_std_init).to(device)
self.policy_old = ActorCritic(state_dim, action_dim, action_std_init).to(device)
self.policy_old.load_state_dict(self.policy.state_dict())
self.buffer = RolloutBuffer()
def select_action(self, state):
with torch.no_grad():
state = torch.FloatTensor(state).to(device)
action, action_logprob = self.policy_old.act(state)
self.buffer.states.append(state)
self.buffer.actions.append(action)
self.buffer.logprobs.append(action_logprob)
return action.item()
def load(self, checkpoint_path):
self.policy_old.load_state_dict(torch.load(checkpoint_path, map_location=lambda storage, loc: storage))
self.policy.load_state_dict(torch.load(checkpoint_path, map_location=lambda storage, loc: storage))
ppo_agent = PPO(ball_num * 4 + 4, 4, 0, 0, 0, 0, 0, None)
import os
checkpoint_path = "PPO_pong_game_16000_1.pth"
# checkpoint_path = "./PPO_preTrained/pong_game/PPO_pong_game_1798_0.pth"
ppo_agent.load(checkpoint_path)
#### PPO end
#game loop
while True:
state = []
for i in range(ball_num):
state.append(ball_pos[i][0])
state.append(ball_pos[i][1])
state.append(ball_vel[i][0])
state.append(ball_vel[i][1])
state.append(paddle1_pos[0][0])
state.append(paddle1_pos[0][1])
state.append(paddle1_pos[1][0])
state.append(paddle1_pos[1][1])
action = ppo_agent.select_action(state)
action += 1
# print(state)
if action == 1:
paddle1_vel[0] = 8
elif action == 2:
paddle1_vel[0] = -8
elif action == 3:
paddle1_vel[1] = 8
elif action == 4:
paddle1_vel[1] = -8
| |
<reponame>jarmovanlenthe/imagemounter<gh_stars>0
#!/usr/bin/env python
#
# This CLI is a total mess. If you want a simple example, please refer to simple_cli.py
#
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import glob
import logging
import sys
import os
from imagemounter import _util, ImageParser, Unmounter, __version__, FILE_SYSTEM_TYPES, VOLUME_SYSTEM_TYPES, \
DISK_MOUNTERS
from imagemounter.cli import CheckAction, get_coloring_func, AppendDictAction, ImageMounterStreamHandler
from imagemounter.exceptions import NoRootFoundError, ImageMounterError, UnsupportedFilesystemError
# Python 2 compatibility
try:
input = raw_input
except NameError:
pass
def main():
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: {0}\n'.format(message))
self.print_help()
sys.exit(2)
parser = MyParser(description='Utility to mount volumes in Encase and dd images locally.')
parser.add_argument('images', nargs='*',
help='path(s) to the image(s) that you want to mount; generally just the first file (e.g. '
'the .E01 or .001 file) or the folder containing the files is enough in the case of '
'split files')
# Special options
parser.add_argument('--version', action='version', version=__version__, help='display version and exit')
parser.add_argument('--check', action=CheckAction, nargs=0,
help='do a system check and list which tools are installed')
parser.add_argument('-i', '--interactive', action='store_true', default=False,
help='enter the interactive shell')
# Utility specific
parser.add_argument('-u', '--unmount', action='store_true', default=False,
help='try to unmount left-overs of previous imount runs; may occasionally not be able to '
'detect all mountpoints or detect too much mountpoints; use --casename to limit '
'the unmount options')
parser.add_argument('-w', '--wait', action='store_true', default=False, help='pause on some additional warnings')
parser.add_argument('-k', '--keep', action='store_true', default=False,
help='keep volumes mounted after program exits')
parser.add_argument('--no-interaction', action='store_true', default=False,
help="do not ask for any user input, implies --keep")
parser.add_argument('-o', '--only-mount', default=None,
help="specify which volume(s) you want to mount, comma-separated")
parser.add_argument('--skip', default=None,
help="specify which volume(s) you do not want to mount, comma-separated")
parser.add_argument('-v', '--verbose', action='count', default=False, help='enable verbose output')
parser.add_argument('-c', '--color', action='store_true', default=False, help='force colorizing the output')
parser.add_argument('--no-color', action='store_true', default=False, help='prevent colorizing the output')
# Additional options
parser.add_argument('-r', '--reconstruct', action='store_true', default=False,
help='attempt to reconstruct the full filesystem tree; implies -s and mounts all partitions '
'at once')
parser.add_argument('--carve', action='store_true', default=False,
help='automatically carve the free space of a mounted volume for deleted files')
parser.add_argument('--vshadow', action='store_true', default=False,
help='automatically mount volume shadow copies')
# Specify options to the subsystem
parser.add_argument('-md', '--mountdir', default=None,
help='specify other directory for volume mountpoints')
parser.add_argument('-p', '--pretty', action='store_true', default=False,
help='use pretty names for mount points; useful in combination with --mountdir')
parser.add_argument('-cn', '--casename', default=None,
help='name to add to the --mountdir, often used in conjunction with --pretty')
parser.add_argument('-rw', '--read-write', action='store_true', default=False,
help='mount image read-write by creating a local write-cache file in a temp directory; '
'implies --disk-mounter=xmount')
parser.add_argument('-m', '--disk-mounter', choices=DISK_MOUNTERS,
default='auto',
help='use other tool to mount the initial images; results may vary between methods and if '
'something doesn\'t work, try another method; dummy can be used when base should not be '
'mounted (default: auto)')
parser.add_argument('-d', '--volume-detector', choices=['pytsk3', 'mmls', 'parted', 'auto'], default='auto',
help='use other volume detection method; pytsk3 and mmls should provide identical results, '
'though pytsk3 is using the direct C API of mmls, but requires pytsk3 to be installed; '
'auto distinguishes between pytsk3 and mmls only '
'(default: auto)')
parser.add_argument('--vstypes', action=AppendDictAction, default={'*': 'detect'},
help='specify type of volume system (partition table); if you don\'t know, '
'use "detect" to try to detect (default: detect)')
parser.add_argument('--fstypes', action=AppendDictAction, default={'?': 'unknown'},
help="allows the specification of the file system type per volume number; format: 0.1=lvm,...; "
"use volume number ? for all undetected file system types and * for all file systems; "
"accepted file systems types are {}".format(", ".join(FILE_SYSTEM_TYPES)) +
", and none only for the ? volume (defaults to unknown)")
parser.add_argument('--keys', action=AppendDictAction, default={},
help="allows the specification of key material per volume number; format: 0.1=p:pass,...; "
"exact format depends on volume type", allow_commas=False)
parser.add_argument('--lazy-unmount', action='store_true', default=False,
help="enables lazily unmounting volumes and disks if direct unmounting fails")
# Toggles for default settings you may perhaps want to override
toggroup = parser.add_argument_group('toggles')
toggroup.add_argument('--single', action='store_true', default=False,
help="do not try to find a volume system, but assume the image contains a single volume")
toggroup.add_argument('--no-single', action='store_true', default=False,
help="prevent trying to mount the image as a single volume if no volume system was found")
args = parser.parse_args()
col = get_coloring_func(color=args.color, no_color=args.color)
# Set logging level for internal Python
handler = ImageMounterStreamHandler(col, args.verbose)
logger = logging.getLogger("imagemounter")
logger.setLevel({0: logging.CRITICAL, 1: logging.WARNING, 2: logging.INFO}.get(args.verbose, logging.DEBUG))
logger.addHandler(handler)
# Check some prerequisites
if os.geteuid(): # Not run as root
print(col('[!] Not running as root!', 'yellow'))
if 'a' in __version__ or 'b' in __version__:
print(col("Development release v{0}. Please report any bugs you encounter.".format(__version__),
attrs=['dark']))
print(col("Bug reports: use -vvvv to get maximum verbosity and include imount --check output in your report",
attrs=['dark']))
print(col("Critical bug? Use git tag to list all versions and use git checkout <version>", attrs=['dark']))
# Make args.single default to None
if args.single == args.no_single:
args.single = None
elif args.single:
args.single = True
elif args.no_single:
args.single = False
# If --no-interaction is specified, imply --keep and not --wait
if args.no_interaction:
args.keep = True
if args.wait:
print(col("[!] --no-interaction can't be used in conjunction with --wait", 'yellow'))
args.wait = False
# Check if mount method supports rw
if args.disk_mounter not in ('xmount', 'auto') and args.read_write:
print(col("[!] {0} does not support mounting read-write! Will mount read-only.".format(args.disk_mounter), 'yellow'))
args.read_write = False
# Check if mount method is available
mount_command = 'avfsd' if args.disk_mounter == 'avfs' else args.disk_mounter
if args.disk_mounter not in ('auto', 'dummy') and not _util.command_exists(mount_command):
print(col("[-] {0} is not installed!".format(args.disk_mounter), 'red'))
sys.exit(1)
elif args.disk_mounter == 'auto' and not any(map(_util.command_exists, ('xmount', 'affuse', 'ewfmount', 'vmware-mount',
'avfsd'))):
print(col("[-] No tools installed to mount the image base! Please install xmount, affuse (afflib-tools), "
"ewfmount (ewf-tools), vmware-mount or avfs first.", 'red'))
sys.exit(1)
# Check if detection method is available
if args.volume_detector == 'pytsk3' and not _util.module_exists('pytsk3'):
print(col("[-] pytsk3 module does not exist!", 'red'))
sys.exit(1)
elif args.volume_detector in ('mmls', 'parted') and not _util.command_exists(args.volume_detector):
print(col("[-] {0} is not installed!".format(args.volume_detector), 'red'))
sys.exit(1)
elif args.volume_detector == 'auto' and not any((_util.module_exists('pytsk3'), _util.command_exists('mmls'),
_util.command_exists('parted'))):
print(col("[-] No tools installed to detect volumes! Please install mmls (sleuthkit), pytsk3 or parted first.",
'red'))
sys.exit(1)
if args.fstypes:
for k, v in args.fstypes.items():
if v.strip() not in FILE_SYSTEM_TYPES and v.strip() not in VOLUME_SYSTEM_TYPES \
and not (k == '?' and v.strip().lower() == 'none'):
print("[!] Error while parsing --fstypes: {} is invalid".format(v))
sys.exit(1)
if '*' in args.fstypes:
print("[!] You are forcing the file system type to {0}. This may cause unexpected results."
.format(args.fstypes['*']))
elif '?' in args.fstypes and args.fstypes['?'] not in ('unknown', 'none'):
print("[!] You are using the file system type {0} as fallback. This may cause unexpected results."
.format(args.fstypes['?']))
if args.only_mount:
args.only_mount = args.only_mount.split(',')
if args.skip:
args.skip = args.skip.split(',')
if args.vstypes:
for k, v in args.vstypes.items():
if v.strip() not in VOLUME_SYSTEM_TYPES:
print("[!] Error while parsing --vstypes: {} is invalid".format(v))
sys.exit(1)
if args.carve and not _util.command_exists('photorec'):
print(col("[-] The photorec command (part of testdisk package) is required to carve, but is not "
"installed. Carving will be disabled.", 'yellow'))
args.carve = False
if args.vshadow and not _util.command_exists('vshadowmount'):
print(col("[-] The vhadowmount command is required to mount volume shadow copies, but is not "
"installed. Mounting volume shadow copies will be disabled.", 'yellow'))
args.vshadow = False
if (args.interactive or not args.images) and not args.unmount:
from imagemounter.cli.shell import main
main()
return
if args.unmount:
unmounter = Unmounter(**vars(args))
commands = unmounter.preview_unmount()
if not commands:
print("[+] Nothing to do")
parser.exit()
print("[!] --unmount will rigorously clean anything that looks like a mount or volume group originating "
"from this utility. You may regret using this if you have other mounts or volume groups that are "
"similarly named. The following commands will be executed:")
for c in commands:
print(" {0}".format(c))
try:
input(">>> Press [enter] to continue or ^C to cancel... ")
unmounter.unmount()
except KeyboardInterrupt:
print("\n[-] Aborted.")
sys.exit(0)
# Enumerate over all images in the CLI
images = []
for num, image in enumerate(args.images):
# If is a directory, find a E01 file in the directory
if os.path.isdir(image):
for f in glob.glob(os.path.join(image, '*.[Ee0]01')):
images.append(f)
break
else:
print(col("[-] {0} is a directory not containing a .001 or .E01 file, aborting!".format(image), "red"))
break
continue
elif not os.path.exists(image):
print(col("[-] Image {0} does not exist, aborting!".format(image), "red"))
break
images.append(image)
else:
p = None
try:
p = ImageParser(images, | |
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_reason': True,
'csv_column_reason': None,
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_reason'], ['Add CSV column.'])
def test_system_importer_file_csv_config_form_reason_column_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_reason': False,
'csv_column_reason': '2',
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_reason'], ['Forgot to choose CSV?'])
def test_system_importer_file_csv_config_form_reason_choice_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
reason_1 = Reason.objects.get(reason_name='reason_1').reason_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_reason': True,
'csv_default_reason': str(reason_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_reason'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_reason_column_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
reason_1 = Reason.objects.get(reason_name='reason_1').reason_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_column_reason': '2',
'csv_default_reason': str(reason_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_reason'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_reason_choice_column_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
reason_1 = Reason.objects.get(reason_name='reason_1').reason_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_reason': True,
'csv_column_reason': '2',
'csv_default_reason': str(reason_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_reason'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_reason_from_csv(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_reason': True,
'csv_column_reason': '2',
}
)
# compare
self.assertTrue(form.is_valid())
def test_system_importer_file_csv_config_form_reason_from_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
reason_1 = Reason.objects.get(reason_name='reason_1').reason_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_default_reason': str(reason_1),
}
)
# compare
self.assertTrue(form.is_valid())
""" recommendation """
def test_system_importer_file_csv_config_form_recommendation_choice_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_recommendation': True,
'csv_column_recommendation': None,
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_recommendation'], ['Add CSV column.'])
def test_system_importer_file_csv_config_form_recommendation_column_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_recommendation': False,
'csv_column_recommendation': '2',
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_recommendation'], ['Forgot to choose CSV?']
)
def test_system_importer_file_csv_config_form_recommendation_choice_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
recommendation_1 = Recommendation.objects.get(
recommendation_name='recommendation_1'
).recommendation_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_recommendation': True,
'csv_default_recommendation': str(recommendation_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_recommendation'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_recommendation_column_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
recommendation_1 = Recommendation.objects.get(
recommendation_name='recommendation_1'
).recommendation_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_column_recommendation': '2',
'csv_default_recommendation': str(recommendation_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_recommendation'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_recommendation_choice_column_and_db(
self,
):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
recommendation_1 = Recommendation.objects.get(
recommendation_name='recommendation_1'
).recommendation_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_recommendation': True,
'csv_column_recommendation': '2',
'csv_default_recommendation': str(recommendation_1),
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_recommendation'],
['Decide between CSV or database or nothing.'],
)
def test_system_importer_file_csv_config_form_recommendation_from_csv(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_recommendation': True,
'csv_column_recommendation': '2',
}
)
# compare
self.assertTrue(form.is_valid())
def test_system_importer_file_csv_config_form_recommendation_from_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
recommendation_1 = Recommendation.objects.get(
recommendation_name='recommendation_1'
).recommendation_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_default_recommendation': str(recommendation_1),
}
)
# compare
self.assertTrue(form.is_valid())
""" serviceprovider """
def test_system_importer_file_csv_config_form_serviceprovider_choice_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_serviceprovider': True,
'csv_column_serviceprovider': None,
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['csv_choice_serviceprovider'], ['Add CSV column.'])
def test_system_importer_file_csv_config_form_serviceprovider_column_only(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
systemstatus_1 = Systemstatus.objects.get(
systemstatus_name='systemstatus_1'
).systemstatus_id
# get form
form = SystemImporterFileCsvConfigForm(
data={
'csv_column_system': '1',
'csv_import_path': '/tmp',
'csv_import_filename': 'systems.csv',
'csv_import_username': str(testuser),
'csv_default_systemstatus': str(systemstatus_1),
'csv_default_analysisstatus': str(analysisstatus_1),
'csv_default_tagfree_systemstatus': str(systemstatus_1),
'csv_default_tagfree_analysisstatus': str(analysisstatus_1),
'csv_tag_lock_systemstatus': 'LOCK_SYSTEMSTATUS',
'csv_tag_lock_analysisstatus': 'LOCK_ANALYSISSTATUS',
'csv_remove_tag': 'tag_remove_prefix',
'csv_field_delimiter': 'field_comma',
'csv_text_quote': 'text_double_quotation_marks',
'csv_ip_delimiter': 'ip_semicolon',
'csv_tag_delimiter': 'tag_space',
'csv_choice_serviceprovider': False,
'csv_column_serviceprovider': '2',
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['csv_choice_serviceprovider'], ['Forgot to choose CSV?']
)
def test_system_importer_file_csv_config_form_serviceprovider_choice_and_db(self):
"""test field validation"""
# get user
testuser = User.objects.get(
username='testuser_system_importer_file_csv_config'
).id
# get objects
analysisstatus_1 = Analysisstatus.objects.get(
analysisstatus_name='analysisstatus_1'
).analysisstatus_id
serviceprovider_1 = Serviceprovider.objects.get(
serviceprovider_name='serviceprovider_1'
).serviceprovider_id
| |
<filename>DragandDrop-imageold.py
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 23 14:24:31 2017
@author: guarind
"""
import cv2
import os
import sys
import numpy as np
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5 import QtCore
from PyQt5.QtCore import QFile, QTextStream
from mini_Emotrics import Emotrics
from settings_window import ShowSettings
class PatientPhotograph(object):
#this class contains all the information associated with a photo
def __init__(self):
self._photo = None #this is a open cv image element
self._file_name = None #is the physical address of the photo
self._name = '' #this is the file name
self._extension = '' #this is the file extension
self._ID = '' #this is the photo ID, there are eight different types of photos
self._shape = None #this is the landmark localization provided by dlib
self._lefteye = None #this si the position and diameter of left iris
self._righteye = None #this si the position and diameter of right iris
self._points = None
self._boundingbox = None #this is the facial bounding-box provided by dlib
"""
This class is in charge of drawing the picture and the landmarks in the main
window, it also takes care of lifting and re-location of landmarks.
"""
class ThumbNailViewer(QtWidgets.QGraphicsView):
dropped = QtCore.pyqtSignal(object)
def __init__(self, parent=None):
#usual parameters to make sure the image can be zoom-in and out and is
#possible to move around the zoomed-in view
super(ThumbNailViewer, self).__init__(parent)
self._scene = QtWidgets.QGraphicsScene(self)
self._photo = QtWidgets.QGraphicsPixmapItem()
self._scene.addItem(self._photo)
self.setScene(self._scene)
self.setTransformationAnchor(QtWidgets.QGraphicsView.AnchorUnderMouse)
self.setResizeAnchor(QtWidgets.QGraphicsView.AnchorUnderMouse)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setBackgroundBrush(QtGui.QBrush(QtGui.QColor(227,227,227)))
self.setFrameShape(QtWidgets.QFrame.NoFrame)
#self.setDragMode(QtWidgets.QGraphicsView.RubberBandDrag)
self.setAcceptDrops(True)
self._hasImage = False
self._ImageAddress = None
self._validextensions = ['.png', '.jpg', '.jpeg', '.tif', '.tiff', '.PNG', '.JPG', '.JPEG', '.TIF', '.TIFF']
self.setBackground()
#QtWidgets.QGraphicsView.RubberBandDrag
self.WidgetName = None
def setBackground(self):
scriptDir = os.path.dirname(os.path.realpath(sys.argv[0]))
pixmap = QtGui.QPixmap(scriptDir + os.path.sep + 'include' + os.path.sep + 'drophere.jpg')
self.setPhoto(pixmap)
def setPhoto(self, pixmap = None):
#this function puts an image in the scece (if pixmap is not None), it
#sets the zoom to zero
if pixmap and not pixmap.isNull():
#self.setDragMode(QtWidgets.QGraphicsView.RubberBandDrag)
self._photo.setPixmap(pixmap)
self.fitInView()
else:
self.setDragMode(QtWidgets.QGraphicsView.NoDrag)
self._photo.setPixmap(QtGui.QPixmap())
def fitInView(self):
#this function takes care of accomodating the view so that it can fit
#in the scene
rect = QtCore.QRectF(self._photo.pixmap().rect())
self.setSceneRect(rect)
if not rect.isNull():
unity = self.transform().mapRect(QtCore.QRectF(0, 0, 1, 1))
self.scale(1 / unity.width(), 1 / unity.height())
viewrect = self.viewport().rect()
scenerect = self.transform().mapRect(rect)
factor = min(viewrect.width() / scenerect.width(),
viewrect.height() / scenerect.height())
self.scale(factor, factor)
self.centerOn(rect.center())
def resizeEvent(self, event):
#this function assure that when the main window is resized the image
#is also resized preserving the h/w ratio
self.fitInView()
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
if event.mimeData().hasUrls():
for url in event.mimeData().urls():
local_address = str(url.toLocalFile())
file_name,extension = os.path.splitext(local_address)
if extension in self._validextensions:
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
else:
event.ignore()
else:
event.ignore()
def dropEvent(self, event):
if event.mimeData().hasUrls():
for url in event.mimeData().urls():
local_address = str(url.toLocalFile())
file_name,extension = os.path.splitext(local_address)
if extension in self._validextensions:
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
pixmap = QtGui.QPixmap(local_address)
self.setPhoto(pixmap)
self._hasImage = True #indicate that there is an image
self._ImageAddress = os.path.normpath(local_address) #store the image address in a class variable
else:
event.ignore()
else:
event.ignore()
def mouseDoubleClickEvent(self, event):
if self._hasImage :
InfoPhotograph = PatientPhotograph()
InfoPhotograph._photo = cv2.imread(self._ImageAddress)
InfoPhotograph._file_name = self._ImageAddress
#split the file name from its extension
file_name,extension = os.path.splitext(InfoPhotograph._file_name)
delimiter = os.path.sep
name=file_name.split(delimiter)
#
InfoPhotograph._name = name = name[-1] #keep only the last porion (the rest is the physical address of the file)
InfoPhotograph._extension = extension[1:]
InfoPhotograph._ID = self.WidgetName
self.dropped.emit(InfoPhotograph)
class window(QtWidgets.QWidget):
def __init__(self):
super(window, self).__init__()
self.setWindowTitle('auto-eFACE')
scriptDir = os.path.dirname(os.path.realpath(sys.argv[0]))
self.setWindowIcon(QtGui.QIcon(scriptDir + os.path.sep + 'include' +os.path.sep +'icon_color'+ os.path.sep + 'meei_3WR_icon.ico'))
#self.setStyleSheet('background:Aliceblue')
self._CalibrationType = 'Iris' #_CalibrationType can be 'Iris' or 'Manual'
self._CalibrationValue = 11.77 #calibration parameter
self._ModelName = 'iBUG' #_ModelType can be 'iBUGS' or 'MEE'
#Each photogaph will have its own class that stores all the relevant
#information, this class will be updated everytime the user double
#clicks on a Thumbnail viewer element. The report card will be generated
#with the information stored in these elements, so they all have to
#be filled before generating the report card
self._Rest = PatientPhotograph()
self._SmallSmile = PatientPhotograph()
self._LargeSmile = PatientPhotograph()
self._EyeBrow = PatientPhotograph()
self._EyeClosureGently = PatientPhotograph()
self._EyeClosureTight = PatientPhotograph()
self._PuckeringLips = PatientPhotograph()
self._DentalShow = PatientPhotograph()
#initialize the User Interface
self.initUI()
def initUI(self):
scriptDir = os.path.dirname(os.path.realpath(sys.argv[0]))
spacerh = QtWidgets.QWidget(self)
spacerh.setFixedSize(15,0)
spacerv = QtWidgets.QWidget(self)
spacerv.setFixedSize(0,15)
#the image will be displayed in the custom ImageViewer
self.Rest = ThumbNailViewer()
self.Rest.WidgetName = "Rest"
self.Rest.dropped.connect(self.pictureDropped)
self.Rest.setMinimumWidth(100)
self.Rest.setMinimumHeight(150)
RestBox = QtWidgets.QGroupBox('Rest')
RestBox.setStyleSheet(self.getStyleSheet(scriptDir + os.path.sep + 'include' + os.path.sep + 'GroupBoxStyle.qss'))
RestLayout = QtWidgets.QGridLayout()
RestLayout.addWidget(self.Rest,0,0,1,1)
RestBox.setLayout(RestLayout)
self.SmallSmile = ThumbNailViewer()
self.SmallSmile.WidgetName = "SmallSmile"
self.SmallSmile.dropped.connect(self.pictureDropped)
self.SmallSmile.setMinimumWidth(100)
self.SmallSmile.setMinimumHeight(150)
SmallSmileBox = QtWidgets.QGroupBox('Best Smile')
SmallSmileBox.setStyleSheet(self.getStyleSheet(scriptDir + os.path.sep + 'include' + os.path.sep + 'GroupBoxStyle.qss'))
SmallSmileLayout = QtWidgets.QGridLayout()
SmallSmileLayout.addWidget(self.SmallSmile,0,0,1,1)
SmallSmileBox.setLayout(SmallSmileLayout)
self.LargeSmile = ThumbNailViewer()
self.LargeSmile.WidgetName = "LargeSmile"
self.LargeSmile.dropped.connect(self.pictureDropped)
self.LargeSmile.setMinimumWidth(100)
self.LargeSmile.setMinimumHeight(150)
LargeSmileBox = QtWidgets.QGroupBox('Biggest Smile')
LargeSmileBox.setStyleSheet(self.getStyleSheet(scriptDir + os.path.sep + 'include' + os.path.sep + 'GroupBoxStyle.qss'))
LargeSmileLayout = QtWidgets.QGridLayout()
LargeSmileLayout.addWidget(self.LargeSmile,0,0,1,1)
LargeSmileBox.setLayout(LargeSmileLayout)
self.EyeBrow = ThumbNailViewer()
self.EyeBrow.WidgetName = "EyeBrow"
self.EyeBrow.dropped.connect(self.pictureDropped)
self.EyeBrow.setMinimumWidth(100)
self.EyeBrow.setMinimumHeight(150)
EyeBrowBox = QtWidgets.QGroupBox('Brow Elevation')
EyeBrowBox.setStyleSheet(self.getStyleSheet(scriptDir + os.path.sep + 'include' + os.path.sep + 'GroupBoxStyle.qss'))
EyeBrowLayout = QtWidgets.QGridLayout()
EyeBrowLayout.addWidget(self.EyeBrow,0,0,1,1)
EyeBrowBox.setLayout(EyeBrowLayout)
self.EyeClosureGently = ThumbNailViewer()
self.EyeClosureGently.WidgetName = "EyeClosureGently"
self.EyeClosureGently.dropped.connect(self.pictureDropped)
self.EyeClosureGently.setMinimumWidth(100)
self.EyeClosureGently.setMinimumHeight(150)
EyeClosureGentlyBox = QtWidgets.QGroupBox('Gentle Eye Closure')
# EyeClosureGentlyBox.setMinimumWidth(100)
# EyeClosureGentlyBox.setMinimumHeight(150)
EyeClosureGentlyBox.setStyleSheet(self.getStyleSheet(scriptDir + os.path.sep + 'include' + os.path.sep + 'GroupBoxStyle.qss'))
EyeClosureGentlyLayout = QtWidgets.QGridLayout()
EyeClosureGentlyLayout.addWidget(self.EyeClosureGently,0,0,1,1)
EyeClosureGentlyBox.setLayout(EyeClosureGentlyLayout)
self.EyeClosureTight = ThumbNailViewer()
self.EyeClosureTight.WidgetName = "EyeClosureTight"
self.EyeClosureTight.dropped.connect(self.pictureDropped)
self.EyeClosureTight.setMinimumWidth(100)
self.EyeClosureTight.setMinimumHeight(150)
EyeClosureTightBox = QtWidgets.QGroupBox('Tight Eye Closure')
EyeClosureTightBox.setStyleSheet(self.getStyleSheet(scriptDir + os.path.sep + 'include' + os.path.sep + 'GroupBoxStyle.qss'))
EyeClosureTightLayout = QtWidgets.QGridLayout()
EyeClosureTightLayout.addWidget(self.EyeClosureTight,0,0,1,1)
EyeClosureTightBox.setLayout(EyeClosureTightLayout)
self.PuckeringLips = ThumbNailViewer()
self.PuckeringLips.WidgetName = "PuckeringLips"
self.PuckeringLips.dropped.connect(self.pictureDropped)
self.PuckeringLips.setMinimumWidth(100)
self.PuckeringLips.setMinimumHeight(150)
PuckeringLipsBox = QtWidgets.QGroupBox('Pucker Lips')
PuckeringLipsBox.setStyleSheet(self.getStyleSheet(scriptDir + os.path.sep + 'include' + os.path.sep + 'GroupBoxStyle.qss'))
PuckeringLipsLayout = QtWidgets.QGridLayout()
PuckeringLipsLayout.addWidget(self.PuckeringLips,0,0,1,1)
PuckeringLipsBox.setLayout(PuckeringLipsLayout)
self.DentalShow = ThumbNailViewer()
self.DentalShow.WidgetName = "DentalShow"
self.DentalShow.dropped.connect(self.pictureDropped)
self.DentalShow.setMinimumWidth(100)
self.DentalShow.setMinimumHeight(150)
DentalShowBox = QtWidgets.QGroupBox('Show Theet')
DentalShowBox.setStyleSheet(self.getStyleSheet(scriptDir + os.path.sep + 'include' + os.path.sep + 'GroupBoxStyle.qss'))
DentalShowLayout = QtWidgets.QGridLayout()
DentalShowLayout.addWidget(self.DentalShow,0,0,1,1)
DentalShowBox.setLayout(DentalShowLayout)
#the main window consist of the toolbar and the ImageViewer
#layout = QtWidgets.QVBoxLayout()
layout = QtWidgets.QGridLayout()
#layout.addWidget(self.Rest_title,0,0,1,1)
#layout.addWidget(self.Rest,1,0,1,2)
layout.addWidget(RestBox,0,0,1,1)
layout.addWidget(spacerv,0,1,2,1)
layout.addWidget(EyeBrowBox,0,2,1,1)
layout.addWidget(spacerv,0,3,2,1)
layout.addWidget(EyeClosureGentlyBox,0,4,1,1)
layout.addWidget(spacerv,0,5,2,1)
layout.addWidget(EyeClosureTightBox ,0,6,1,1)
layout.addWidget(spacerh,1,0,1,6)
layout.addWidget(SmallSmileBox,2,0,1,1)
layout.addWidget(LargeSmileBox,2,2,1,1)
layout.addWidget(PuckeringLipsBox,2,4,1,1)
layout.addWidget(DentalShowBox,2,6,1,1)
#toolbar
loadAction = QtWidgets.QAction('Load images from folder', self)
loadAction.setIcon(QtGui.QIcon(scriptDir + os.path.sep + 'include' +os.path.sep +'icon_color'+ os.path.sep + 'load_icon2.png'))
#loadAction.triggered.connect(self.load_file)
saveAction = QtWidgets.QAction('Save results', self)
saveAction.setIcon(QtGui.QIcon(scriptDir + os.path.sep + 'include' +os.path.sep +'icon_color'+ os.path.sep + 'save_icon2.png'))
#saveAction.triggered.connect(self.save_results)
settingsAction = QtWidgets.QAction('Change settings', self)
settingsAction.setIcon(QtGui.QIcon(scriptDir + os.path.sep + 'include' +os.path.sep +'icon_color'+ os.path.sep + 'settings-icon2.png'))
settingsAction.triggered.connect(self.settings)
ReportAction = QtWidgets.QAction('Generate report', self)
ReportAction.setIcon(QtGui.QIcon(scriptDir + os.path.sep + 'include' +os.path.sep +'icon_color'+ os.path.sep + 'report_card.png'))
#ReportAction.triggered.connect(self.report_card)
exitAction = QtWidgets.QAction('Exit', self)
exitAction.setIcon(QtGui.QIcon(scriptDir + os.path.sep + 'include' +os.path.sep +'icon_color'+ os.path.sep + 'exit_icon2.png'))
exitAction.triggered.connect(self.close_app)
#create the toolbar and add the actions
self.toolBar = QtWidgets.QToolBar(self)
self.toolBar.addActions((loadAction, ReportAction, settingsAction, saveAction, exitAction))
#set the size of each icon to 50x50
self.toolBar.setIconSize(QtCore.QSize(50,50))
for action in self.toolBar.actions():
widget = self.toolBar.widgetForAction(action)
widget.setFixedSize(50, 50)
self.toolBar.setMinimumSize(self.toolBar.sizeHint())
self.toolBar.setStyleSheet('QToolBar{spacing:5px;}')
LargeLayout = QtWidgets.QVBoxLayout(self)
LargeLayout.addWidget(self.toolBar)
LargeLayout.addLayout(layout)
self.setLayout(LargeLayout)
self.show()
self.move(QtWidgets.QApplication.desktop().screen().rect().center()- self.rect().center())
#this function read the style sheet used to presents the GroupBox,
#it is located in .\include\GroupBoxStyle.qss
def getStyleSheet(self, path):
f = QFile(path)
f.open(QFile.ReadOnly | QFile.Text)
stylesheet = QTextStream(f).readAll()
f.close()
return stylesheet
def pictureDropped(self, photograph):
#print(photograph._file_name)
show_me = Emotrics(photograph._file_name, self._CalibrationType, self._CalibrationValue, self._ModelName)
show_me.exec_()
# if photograph._ID == "Rest":
# #the user modified the Rest photo
# self._Rest._photo = photograph._photo
# self._Rest._file_name = photograph._file_name
# self._Rest._name = photograph._name
# self._Rest._extension = photograph._extension
# self._Rest._ID = photograph._ID
# self._Rest._shape = show_me.displayImage._shape
# self._Rest._lefteye = show_me.displayImage._lefteye
# self._Rest._righteye = show_me.displayImage._righteye
# self._Rest._points = show_me.displayImage._points
# self._Rest._boundingbox = show_me.displayImage._boundingbox
#
# elif photograph._ID == "SmallSmile":
# #the user modified the small smile photo
# self._SmallSmile._photo = photograph._photo
# self._SmallSmile._file_name = photograph._file_name
# self._SmallSmile._name = photograph._name
# self._SmallSmile._extension = photograph._extension
# self._SmallSmile._ID = photograph._ID
# self._SmallSmile._shape = show_me.displayImage._shape
# self._SmallSmile._lefteye = show_me.displayImage._lefteye
# self._SmallSmile._righteye = show_me.displayImage._righteye
# self._SmallSmile._points = show_me.displayImage._points
# self._SmallSmile._boundingbox = show_me.displayImage._boundingbox
#
# elif photograph._ID == "LargeSmile":
# #the user modified | |
import os
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
import torch.backends.cudnn as cudnn
cudnn.benchmark = False
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import sys
import os
import json
import time
import argparse
import random
from decimal import Decimal
import numpy as np
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from graph4nlp.pytorch.data.data import *
from conll import ConllDataset
from graph4nlp.pytorch.modules.graph_construction import *
from dependency_graph_construction_without_tokenize import DependencyBasedGraphConstruction_without_tokenizer
from line_graph_construction import LineBasedGraphConstruction
from graph4nlp.pytorch.modules.graph_construction.node_embedding_based_graph_construction import *
from graph4nlp.pytorch.modules.graph_construction.node_embedding_based_refined_graph_construction import *
from graph4nlp.pytorch.modules.utils.generic_utils import to_cuda
from graph4nlp.pytorch.modules.graph_construction.embedding_construction import WordEmbedding
from graph4nlp.pytorch.modules.graph_embedding.graphsage import GraphSAGE
from graph4nlp.pytorch.modules.graph_embedding.gat import GAT
from graph4nlp.pytorch.modules.graph_embedding.ggnn import GGNN
from graph4nlp.pytorch.modules.graph_embedding.gcn import GCN
from graph4nlp.pytorch.modules.utils.vocab_utils import Vocab
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.optim as optim
import dgl
#from torchcrf import CRF
from graph4nlp.pytorch.modules.evaluation.base import EvaluationMetricBase
from graph4nlp.pytorch.modules.prediction.classification.node_classification.FeedForwardNN import FeedForwardNN
from graph4nlp.pytorch.modules.prediction.classification.node_classification.BiLSTMFeedForwardNN import BiLSTMFeedForwardNN
from graph4nlp.pytorch.modules.loss.general_loss import GeneralLoss
from graph4nlp.pytorch.modules.evaluation.accuracy import Accuracy
from conlleval import evaluate
def all_to_cuda(data, device=None):
if isinstance(data, torch.Tensor):
data = to_cuda(data, device)
elif isinstance(data, (list, dict)):
keys = range(len(data)) if isinstance(data, list) else data.keys()
for k in keys:
if isinstance(data[k], torch.Tensor):
data[k] = to_cuda(data[k], device)
return data
def conll_score(preds, tgts,tag_types):
#preds is a list and each elements is the list of tags of a sentence
#tgts is a lits and each elements is the tensor of tags of a text
pred_list=[]
tgt_list=[]
for idx in range(len(preds)):
pred_list.append(preds[idx].cpu().clone().numpy())
for idx in range(len(tgts)):
tgt_list.extend(tgts[idx].cpu().clone().numpy().tolist())
pred_tags=[tag_types[int(pred)] for pred in pred_list]
tgt_tags=[tag_types[int(tgt)] for tgt in tgt_list]
prec, rec, f1 = evaluate(tgt_tags, pred_tags, verbose=False)
return prec, rec, f1
def logits2tag(logits):
_, pred=torch.max(logits,dim=-1)
#print(pred.size())
return pred
def write_file(tokens_collect,pred_collect,tag_collect,file_name,tag_types):
num_sent=len(tokens_collect)
f=open(file_name,'w')
for idx in range(num_sent):
sent_token=tokens_collect[idx]
sent_pred=pred_collect[idx].cpu().clone().numpy()
sent_tag=tag_collect[idx].cpu().clone().numpy()
#f.write('%s\n' % ('-X- SENTENCE START'))
for word_idx in range(len(sent_token)):
w=sent_token[word_idx]
tgt=tag_types[sent_tag[word_idx].item()]
pred=tag_types[sent_pred[word_idx].item()]
f.write('%d %s %s %s\n' % (word_idx + 1, w, tgt, pred))
f.close()
def get_tokens(g_list):
tokens=[]
for g in g_list:
sent_token=[]
dic=g.node_attributes
for node in dic:
sent_token.append(node['token'])
if 'ROOT' in sent_token:
sent_token.remove('ROOT')
tokens.append(sent_token)
return tokens
class SentenceBiLSTMCRF(nn.Module):
def __init__(self, device=None, use_rnn=False):
super(SentenceBiLSTMCRF, self).__init__()
self.use_rnn=use_rnn
#if self.use_rnn is True:
self.prediction=BiLSTMFeedForwardNN(args.init_hidden_size*1,args.init_hidden_size*1).to(device)
#self.crf=CRFLayer(8).to(device)
#self.use_crf=use_crf
self.linear1=nn.Linear(int(args.init_hidden_size*1), args.hidden_size)
self.linear1_=nn.Linear(int(args.hidden_size*1), args.num_class)
self.dropout_tag = nn.Dropout(args.tag_dropout)
self.dropout_rnn_out = nn.Dropout(p=args.rnn_dropout)
self.logsoftmax = nn.LogSoftmax(dim=1)
self.nll_loss = nn.NLLLoss()
def forward(self,batch_graph,tgt_tags):
batch_graph= self.prediction(batch_graph)
batch_emb=batch_graph.node_features['logits']
batch_graph.node_features['logits']=self.linear1_(self.dropout_tag(F.elu(self.linear1(self.dropout_rnn_out(batch_emb)))))
tgt=torch.cat(tgt_tags)
logits=batch_graph.node_features['logits'][:,:] #[batch*sentence*num_nodes,num_lable]
loss=self.nll_loss(self.logsoftmax(logits),tgt)
pred_tags=logits2tag(logits)
return loss, pred_tags
class Word2tag(nn.Module):
def __init__(self, vocab, device=None):
super(Word2tag, self).__init__()
self.vocab = vocab
self.device =device
embedding_style = {'single_token_item': True if args.graph_type != 'ie' else False,
'emb_strategy': 'w2v_bilstm',
'num_rnn_layers': 1,
'bert_model_name': 'bert-base-uncased',
'bert_lower_case': True
}
use_edge_weight = False
if args.graph_type=='line_graph':
if args.gnn_type=='ggnn':
self.graph_topology = LineBasedGraphConstruction(embedding_style=embedding_style,
vocab=vocab.in_word_vocab,
hidden_size=int(args.init_hidden_size/2),
rnn_dropout=None,
word_dropout=args.word_dropout,
device=self.device,
fix_word_emb=not args.no_fix_word_emb,
fix_bert_emb=not args.no_fix_bert_emb)
else:
self.graph_topology = LineBasedGraphConstruction(embedding_style=embedding_style,
vocab=vocab.in_word_vocab,
hidden_size=args.init_hidden_size,
rnn_dropout=None,
word_dropout=args.word_dropout,
device=self.device,
fix_word_emb=not args.no_fix_word_emb,
fix_bert_emb=not args.no_fix_bert_emb)
if args.graph_type=='dependency_graph':
if args.gnn_type=='ggnn':
self.graph_topology = DependencyBasedGraphConstruction_without_tokenizer(embedding_style=embedding_style,
vocab=vocab.in_word_vocab,
hidden_size=int(args.init_hidden_size/2),
rnn_dropout=None,
word_dropout=args.word_dropout,
device=self.device,
fix_word_emb=not args.no_fix_word_emb,
fix_bert_emb=not args.no_fix_bert_emb)
else:
self.graph_topology = DependencyBasedGraphConstruction_without_tokenizer(embedding_style=embedding_style,
vocab=vocab.in_word_vocab,
hidden_size=args.init_hidden_size,
rnn_dropout=None,
word_dropout=args.word_dropout,
device=self.device,
fix_word_emb=not args.no_fix_word_emb,
fix_bert_emb=not args.no_fix_bert_emb)
if args.graph_type=='node_emb':
self.graph_topology = NodeEmbeddingBasedGraphConstruction(
vocab.in_word_vocab,
embedding_style,
sim_metric_type=args.gl_metric_type,
num_heads=args.gl_num_heads,
top_k_neigh=args.gl_top_k,
epsilon_neigh=args.gl_epsilon,
smoothness_ratio=args.gl_smoothness_ratio,
connectivity_ratio=args.gl_connectivity_ratio,
sparsity_ratio=args.gl_sparsity_ratio,
input_size=args.init_hidden_size,
hidden_size=args.init_hidden_size,
fix_word_emb=not args.no_fix_word_emb,
fix_bert_emb=not args.no_fix_bert_emb,
word_dropout=args.word_dropout,
rnn_dropout=None,
device=self.device)
use_edge_weight = True
if args.graph_type=='node_emb_refined':
self.graph_topology = NodeEmbeddingBasedRefinedGraphConstruction(
vocab.in_word_vocab,
embedding_style,
args.init_adj_alpha,
sim_metric_type=args.gl_metric_type,
num_heads=args.gl_num_heads,
top_k_neigh=args.gl_top_k,
epsilon_neigh=args.gl_epsilon,
smoothness_ratio=args.gl_smoothness_ratio,
connectivity_ratio=args.gl_connectivity_ratio,
sparsity_ratio=args.gl_sparsity_ratio,
input_size=args.init_hidden_size,
hidden_size=args.init_hidden_size,
fix_word_emb=not args.no_fix_word_emb,
word_dropout=args.word_dropout,
rnn_dropout=None,
device=self.device)
use_edge_weight = True
if 'w2v' in self.graph_topology.embedding_layer.word_emb_layers:
self.word_emb = self.graph_topology.embedding_layer.word_emb_layers['w2v'].word_emb_layer
else:
self.word_emb = WordEmbedding(
self.vocab.in_word_vocab.embeddings.shape[0],
self.vocab.in_word_vocab.embeddings.shape[1],
pretrained_word_emb=self.vocab.in_word_vocab.embeddings,
fix_emb=not args.no_fix_word_emb,
device=self.device).word_emb_layer
self.gnn_type=args.gnn_type
self.use_gnn=args.use_gnn
self.linear0=nn.Linear(int(args.init_hidden_size*1), args.hidden_size).to(self.device)
self.linear0_=nn.Linear(int(args.init_hidden_size*1), args.init_hidden_size).to(self.device)
self.dropout_tag = nn.Dropout(args.tag_dropout)
self.dropout_rnn_out = nn.Dropout(p=args.rnn_dropout)
if self.use_gnn is False:
self.bilstmcrf=SentenceBiLSTMCRF(device=self.device, use_rnn=False).to(self.device)
else:
if self.gnn_type=="graphsage":
if args.direction_option=='bi_sep':
self.gnn = GraphSAGE(args.gnn_num_layers, args.hidden_size, int(args.init_hidden_size/2), int(args.init_hidden_size/2), aggregator_type='mean',direction_option=args.direction_option, activation=F.elu).to(self.device)
else:
self.gnn = GraphSAGE(args.gnn_num_layers, args.hidden_size, args.init_hidden_size, args.init_hidden_size, aggregator_type='mean',direction_option=args.direction_option, activation=F.elu).to(self.device)
self.bilstmcrf=SentenceBiLSTMCRF(device=self.device, use_rnn=True).to(self.device)
elif self.gnn_type=="ggnn":
if args.direction_option=='bi_sep':
self.gnn = GGNN(args.gnn_num_layers, int(args.init_hidden_size/2), int(args.init_hidden_size/2),direction_option=args.direction_option,n_etypes=1).to(self.device)
else:
self.gnn = GGNN(args.gnn_num_layers, args.init_hidden_size, args.init_hidden_size,direction_option=args.direction_option,n_etypes=1).to(self.device)
self.bilstmcrf=SentenceBiLSTMCRF(device=self.device,use_rnn=True).to(self.device)
elif self.gnn_type=="gat":
heads = 2
if args.direction_option=='bi_sep':
self.gnn = GAT(args.gnn_num_layers,args.hidden_size,int(args.init_hidden_size/2),int(args.init_hidden_size/2), heads,direction_option=args.direction_option,feat_drop=0.6, attn_drop=0.6, negative_slope=0.2, activation=F.elu).to(self.device)
else:
self.gnn = GAT(args.gnn_num_layers,args.hidden_size,args.init_hidden_size,args.init_hidden_size, heads,direction_option=args.direction_option,feat_drop=0.6, attn_drop=0.6, negative_slope=0.2, activation=F.elu).to(self.device)
self.bilstmcrf=SentenceBiLSTMCRF(device=self.device, use_rnn=True).to(self.device)
elif self.gnn_type=="gcn":
if args.direction_option=='bi_sep':
self.gnn = GCN(args.gnn_num_layers, args.hidden_size, int(args.init_hidden_size/2), int(args.init_hidden_size/2), direction_option=args.direction_option, activation=F.elu).to(self.device)
else:
self.gnn = GCN(args.gnn_num_layers, args.hidden_size, args.init_hidden_size, args.init_hidden_size,direction_option=args.direction_option, activation=F.elu).to(self.device)
self.bilstmcrf=SentenceBiLSTMCRF(device=self.device,use_rnn=True).to(self.device)
def forward(self, graph, tgt=None, require_loss=True):
batch_graph = self.graph_topology(graph)
if self.use_gnn is False:
batch_graph.node_features['node_emb']=batch_graph.node_features['node_feat']
batch_graph.node_features['node_emb']=self.dropout_tag(F.elu(self.linear0_(self.dropout_rnn_out(batch_graph.node_features['node_emb']))))
else:
# run GNN
if self.gnn_type=="ggnn":
batch_graph.node_features['node_feat']=batch_graph.node_features['node_feat']
else:
batch_graph.node_features['node_feat']=self.dropout_tag(F.elu(self.linear0(self.dropout_rnn_out(batch_graph.node_features['node_feat']))))
batch_graph = self.gnn(batch_graph)
# down-task
loss,pred=self.bilstmcrf(batch_graph,tgt)
self.loss=loss
if require_loss==True:
return pred, self.loss
else:
loss=None
return pred, self.loss
class Conll:
def __init__(self):
super(Conll, self).__init__()
self.tag_types=['I-PER', 'O', 'B-ORG', 'B-LOC', 'I-ORG', 'I-MISC', 'I-LOC', 'B-MISC']
if args.gpu>-1:
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
self.checkpoint_path='./checkpoints/'
if not os.path.exists(self.checkpoint_path):
os.mkdir(self.checkpoint_path)
self._build_dataloader()
print("finish dataloading")
self._build_model()
print("finish building model")
self._build_optimizer()
self._build_evaluation()
def _build_dataloader(self):
print("starting build the dataset")
if args.graph_type=='line_graph':
dataset = ConllDataset(root_dir="examples/pytorch/name_entity_recognition/conll",
topology_builder=LineBasedGraphConstruction,
graph_type='static',
pretrained_word_emb_cache_dir=args.pre_word_emb_file,
topology_subdir='LineGraph',
tag_types=self.tag_types)
elif args.graph_type=='dependency_graph':
dataset = ConllDataset(root_dir="examples/pytorch/name_entity_recognition/conll",
topology_builder=DependencyBasedGraphConstruction_without_tokenizer,
graph_type='static',
pretrained_word_emb_cache_dir=args.pre_word_emb_file,
topology_subdir='DependencyGraph',
tag_types=self.tag_types)
elif args.graph_type=='node_emb':
dataset = ConllDataset(root_dir="examples/pytorch/name_entity_recognition/conll",
topology_builder=NodeEmbeddingBasedGraphConstruction,
graph_type='dynamic',
pretrained_word_emb_cache_dir=args.pre_word_emb_file,
topology_subdir='DynamicGraph_node_emb',
tag_types=self.tag_types,
merge_strategy=None,
dynamic_graph_type=args.graph_type if args.graph_type in ('node_emb', 'node_emb_refined') else None)
elif args.graph_type=='node_emb_refined':
if args.init_graph_type == 'line':
dynamic_init_topology_builder = LineBasedGraphConstruction
elif args.init_graph_type == 'dependency':
dynamic_init_topology_builder = DependencyBasedGraphConstruction_without_tokenizer
elif args.init_graph_type == 'constituency':
dynamic_init_topology_builder = ConstituencyBasedGraphConstruction
elif args.init_graph_type == 'ie':
merge_strategy = 'global'
dynamic_init_topology_builder = IEBasedGraphConstruction
else:
# init_topology_builder
raise RuntimeError('Define your own init_topology_builder')
dataset = ConllDataset(root_dir="examples/pytorch/name_entity_recognition/conll",
topology_builder=NodeEmbeddingBasedRefinedGraphConstruction,
graph_type='dynamic',
pretrained_word_emb_cache_dir=args.pre_word_emb_file,
topology_subdir='DynamicGraph_node_emb_refined',
tag_types=self.tag_types,
dynamic_graph_type=args.graph_type if args.graph_type in ('node_emb', 'node_emb_refined') else None,
dynamic_init_topology_builder=dynamic_init_topology_builder,
dynamic_init_topology_aux_args={'dummy_param': 0})
print(len(dataset.train))
print("strating loading the training data")
self.train_dataloader = DataLoader(dataset.train, batch_size=args.batch_size, shuffle=True,
num_workers=1,
collate_fn=dataset.collate_fn)
print("strating loading the validating data")
self.val_dataloader = DataLoader(dataset.val, batch_size=100, shuffle=True,
num_workers=1,
collate_fn=dataset.collate_fn)
print("strating loading the testing data")
self.test_dataloader = DataLoader(dataset.test, batch_size=100, shuffle=True,
num_workers=1,
collate_fn=dataset.collate_fn)
print("strating loading the vocab")
self.vocab = dataset.vocab_model
def _build_model(self):
self.model = Word2tag(self.vocab,device=self.device).to(self.device)
def _build_optimizer(self):
parameters = [p for p in self.model.parameters() if p.requires_grad]
self.optimizer = optim.Adam(parameters,lr=args.lr, weight_decay=args.weight_decay)
def _build_evaluation(self):
self.metrics = Accuracy(['F1','precision','recall'])
def train(self):
max_score = -1
max_idx=0
for epoch in range(args.epochs):
self.model.train()
print("Epoch: {}".format(epoch))
pred_collect = []
gt_collect = []
for data in self.train_dataloader:
graph, tgt = data["graph_data"], data["tgt_tag"]
tgt_l = [tgt_.to(self.device) for tgt_ in tgt]
graph = graph.to(self.device)
pred_tags, loss = self.model(graph, tgt_l, require_loss=True)
pred_collect.extend(pred_tags) #pred: list of batch_sentence pred tensor
gt_collect.extend(tgt) #tgt:list of sentence token tensor
#num_tokens=len(torch.cat(pred_tags).view(-1))
print("Epoch: {}".format(epoch)+" loss:"+str(loss.cpu().item()))
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1)
self.optimizer.step()
if epoch%1 ==0:
score = self.evaluate(epoch)
if score>max_score:
torch.save(self.model.state_dict(),self.checkpoint_path+'checkpoint_best')
max_idx=epoch
max_score = max(max_score, score)
return max_score,max_idx
def evaluate(self,epoch):
self.model.eval()
pred_collect = []
gt_collect = []
tokens_collect=[]
with torch.no_grad():
for data in self.val_dataloader:
graph, tgt = data["graph_data"], data["tgt_tag"]
graph = graph.to(self.device)
tgt_l = [tgt_.to(self.device) for tgt_ in tgt]
pred,loss= self.model(graph, tgt_l, require_loss=True)
pred_collect.extend(pred) #pred: list of batch_sentence pred tensor
gt_collect.extend(tgt) #tgt:list of sentence token tensor
tokens_collect.extend(get_tokens(from_batch(graph)))
prec, rec, f1 = conll_score(pred_collect,gt_collect,self.tag_types)
print("Testing results: precision is %5.2f, rec is %5.2f, f1 is %5.2f"%(prec,rec,f1))
print("Epoch: {}".format(epoch)+" loss:"+str(loss.cpu().item()))
return f1
def test(self):
self.model.load_state_dict(torch.load(self.checkpoint_path+'checkpoint_best'))
print("sucessfully loaded the existing saved model!")
self.model.eval()
pred_collect = []
tokens_collect = []
tgt_collect=[]
with torch.no_grad():
for data in self.test_dataloader:
graph, tgt = data["graph_data"], data["tgt_tag"]
graph = graph.to(self.device)
tgt_l = [tgt_.to(self.device) for tgt_ in tgt]
pred,loss = self.model(graph, tgt_l,require_loss=True)
#pred = logits2tag(g)
pred_collect.extend(pred)
tgt_collect.extend(tgt)
tokens_collect.extend(get_tokens(from_batch(graph)))
prec, rec, f1 = conll_score(pred_collect,tgt_collect,self.tag_types)
print("Testing results: precision is %5.2f, rec is %5.2f, f1 is %5.2f"%(prec,rec,f1))
return f1
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='NER')
parser.add_argument("--gpu", type=int, default=-1,
help="which GPU to use.")
parser.add_argument("--epochs", type=int, default=150,
help="number of training epochs")
parser.add_argument("--direction_option", type=str, default='bi_fuse',
help="direction type (`undirected`, `bi_fuse`, `bi_sep`)")
parser.add_argument("--lstm_num_layers", type=int, default=1,
help="number of hidden layers in lstm")
parser.add_argument("--gnn_num_layers", type=int, default=1,
help="number of hidden layers in gnn")
parser.add_argument("--init_hidden_size", type=int, default=300,
help="initial_emb_hidden_size")
parser.add_argument("--hidden_size", type=int, default=128,
help="initial_emb_hidden_size")
parser.add_argument("--lstm_hidden_size", type=int, default=80,
help="initial_emb_hidden_size")
parser.add_argument("--num_class", type=int, default=8,
help="num_class")
parser.add_argument("--residual", action="store_true", default=False,
help="use residual connection")
parser.add_argument("--word_dropout", type=float, default=0.5,
help="input feature dropout")
parser.add_argument("--tag_dropout", type=float, default=0.5,
help="input feature dropout")
parser.add_argument("--rnn_dropout", type=list, default=0.33,
help="dropout for rnn in word_emb")
parser.add_argument("--lr", type=float, default=0.01,
help="learning rate")
parser.add_argument('--weight-decay', type=float, default=5e-5,
help="weight decay")
parser.add_argument('--aggregate_type', type=str, default='mean',
help="aggregate type: 'mean','gcn','pool','lstm'")
parser.add_argument('--gnn_type', type=str, default='graphsage',
help="ingnn type: 'gat','graphsage','ggnn'")
parser.add_argument('--use_gnn', type=bool, default=True,
help="whether to use gnn")
parser.add_argument("--batch_size", type=int, default=100,
help="batch size for training")
parser.add_argument("--graph_type", type=str, default="line_graph",
help="graph_type:line_graph, dependency_graph, dynamic_graph")
parser.add_argument("--init_graph_type", | |
<reponame>saikrishnawanted/mybot<gh_stars>10-100
from __future__ import absolute_import
import threading
from time import sleep
import sys
from datetime import datetime
from os.path import getmtime
import atexit
import signal
from bitmex_bot import bitmex, indicators
from bitmex_bot.settings import settings
from bitmex_bot.utils import log, constants, errors
from bitmex_bot.bitmex_historical import Bitmex
from bitmex_bot.bot_trade import BOT_TRADE
# Used for reloading the bot - saves modified times of key files
import os
watched_files_mtimes = [(f, getmtime(f)) for f in settings.WATCHED_FILES]
#
# Helpers
#
logger = log.setup_custom_logger('root')
class ExchangeInterface:
def __init__(self, dry_run=False):
self.dry_run = dry_run
if len(sys.argv) > 1:
self.symbol = sys.argv[1]
else:
self.symbol = settings.SYMBOL
url = settings.BASE_URL_TESTING
# mode in which mode you want to run your bot
self.mode = settings.MODE
if self.mode == "LIVE":
url = settings.BASE_URL_LIVE
self.bitmex = bitmex.BitMEX(base_url=url, symbol=self.symbol,
apiKey=settings.API_KEY, apiSecret=settings.API_SECRET,
orderIDPrefix=settings.ORDERID_PREFIX)
def cancel_order(self, order):
tickLog = self.get_instrument()['tickLog']
logger.info("Canceling: %s %d @ %.*f" % (order['side'], order['orderQty'], tickLog, order['price']))
while True:
try:
self.bitmex.cancel(order['orderID'])
sleep(settings.API_REST_INTERVAL)
except ValueError as e:
logger.info(e)
sleep(settings.API_ERROR_INTERVAL)
else:
break
def cancel_all_orders(self):
logger.info("Resetting current position. Canceling all existing orders.")
tickLog = self.get_instrument()['tickLog']
orders_1 = self.bitmex.http_open_orders()
for order in orders_1:
logger.info("Canceling: %s %d @ %.*f" % (order['side'], order['orderQty'], tickLog, order['price']))
if len(orders_1):
self.bitmex.cancel([order['orderID'] for order in orders_1])
sleep(settings.API_REST_INTERVAL)
def get_portfolio(self):
contracts = settings.CONTRACTS
portfolio = {}
for symbol in contracts:
position = self.bitmex.position(symbol=symbol)
instrument = self.bitmex.instrument(symbol=symbol)
if instrument['isQuanto']:
future_type = "Quanto"
elif instrument['isInverse']:
future_type = "Inverse"
elif not instrument['isQuanto'] and not instrument['isInverse']:
future_type = "Linear"
else:
raise NotImplementedError("Unknown future type; not quanto or inverse: %s" % instrument['symbol'])
if instrument['underlyingToSettleMultiplier'] is None:
multiplier = float(instrument['multiplier']) / float(instrument['quoteToSettleMultiplier'])
else:
multiplier = float(instrument['multiplier']) / float(instrument['underlyingToSettleMultiplier'])
portfolio[symbol] = {
"currentQty": float(position['currentQty']),
"futureType": future_type,
"multiplier": multiplier,
"markPrice": float(instrument['markPrice']),
"spot": float(instrument['indicativeSettlePrice']),
}
return portfolio
def get_user_balance(self):
return self.bitmex.user_balance()
def calc_delta(self):
"""Calculate currency delta for portfolio"""
portfolio = self.get_portfolio()
spot_delta = 0
mark_delta = 0
for symbol in portfolio:
item = portfolio[symbol]
if item['futureType'] == "Quanto":
spot_delta += item['currentQty'] * item['multiplier'] * item['spot']
mark_delta += item['currentQty'] * item['multiplier'] * item['markPrice']
elif item['futureType'] == "Inverse":
spot_delta += (item['multiplier'] / item['spot']) * item['currentQty']
mark_delta += (item['multiplier'] / item['markPrice']) * item['currentQty']
elif item['futureType'] == "Linear":
spot_delta += item['multiplier'] * item['currentQty']
mark_delta += item['multiplier'] * item['currentQty']
basis_delta = mark_delta - spot_delta
delta = {
"spot": spot_delta,
"mark_price": mark_delta,
"basis": basis_delta
}
return delta
def get_delta(self, symbol=None):
if symbol is None:
symbol = self.symbol
return self.get_position(symbol)
def get_instrument(self, symbol=None):
if symbol is None:
symbol = self.symbol
return self.bitmex.instrument(symbol)
def get_margin(self):
return self.bitmex.funds()
def get_orders(self):
return self.bitmex.open_orders()
def set_isolate_margin(self):
self.bitmex.isolate_margin(self.symbol)
def get_highest_buy(self):
buys = [o for o in self.get_orders() if o['side'] == 'Buy']
if not len(buys):
return {'price': -2 ** 32}
highest_buy = max(buys or [], key=lambda o: o['price'])
return highest_buy if highest_buy else {'price': -2 ** 32}
def get_lowest_sell(self):
sells = [o for o in self.get_orders() if o['side'] == 'Sell']
if not len(sells):
return {'price': 2 ** 32}
lowest_sell = min(sells or [], key=lambda o: o['price'])
return lowest_sell if lowest_sell else {'price': 2 ** 32} # ought to be enough for anyone
def get_position(self, symbol=None):
if symbol is None:
symbol = self.symbol
return self.bitmex.position(symbol)['currentQty']
def get_ticker(self, symbol=None):
if symbol is None:
symbol = self.symbol
return self.bitmex.ticker_data(symbol)
def close_position(self):
return self.bitmex.close_position()
def is_open(self):
"""Check that websockets are still open."""
return not self.bitmex.ws.exited
def check_market_open(self):
instrument = self.get_instrument()
if instrument["state"] != "Open" and instrument["state"] != "Closed":
raise errors.MarketClosedError("The instrument %s is not open. State: %s" %
(self.symbol, instrument["state"]))
def check_if_orderbook_empty(self):
"""This function checks whether the order book is empty"""
instrument = self.get_instrument()
if instrument['midPrice'] is None:
raise errors.MarketEmptyError("Orderbook is empty, cannot quote")
def amend_bulk_orders(self, orders):
return self.bitmex.amend_bulk_orders(orders)
def create_bulk_orders(self, orders):
return self.bitmex.create_bulk_orders(orders)
def cancel_bulk_orders(self, orders):
return self.bitmex.cancel([order['orderID'] for order in orders])
def place_order(self, **kwargs):
"""
:param kwargs:
:return:
"""
if kwargs['side'] == 'buy':
kwargs.pop('side')
return self.bitmex.buy(**kwargs)
elif kwargs['side'] == 'sell':
kwargs.pop('side')
return self.bitmex.sell(**kwargs)
class OrderManager:
UP = "up"
DOWN = "down"
SELL = "sell"
BUY = "buy"
def __init__(self):
self.exchange = ExchangeInterface()
atexit.register(self.exit)
signal.signal(signal.SIGTERM, self.exit)
self.current_bitmex_price = 0
logger.info("-------------------------------------------------------------")
logger.info("Starting Bot......")
self.macd_signal = False
self.current_ask_price = 0
self.current_bid_price = 0
# price at which bot enters first order
self.sequence = ""
self.last_price = 0
# to store current prices for per bot run
self.initial_order = False
self.close_order = False
self.amount = settings.POSITION
self.is_trade = False
self.stop_price = 0
self.profit_price = 0
self.trade_signal = False
logger.info("Using symbol %s." % self.exchange.symbol)
def init(self):
if settings.DRY_RUN:
logger.info("Initializing dry run. Orders printed below represent what would be posted to BitMEX.")
else:
logger.info("Order Manager initializing, connecting to BitMEX. Live run: executing real trades.")
self.start_time = datetime.now()
self.instrument = self.exchange.get_instrument()
self.starting_qty1 = self.exchange.get_delta()
self.running_qty = self.starting_qty1
self.reset()
# set cross margin for the trade
self.exchange.set_isolate_margin()
# self.place_orders()
def reset(self):
self.exchange.cancel_all_orders()
self.sanity_check()
self.print_status()
if settings.DRY_RUN:
sys.exit()
def print_status(self):
"""Print the current MM status."""
margin1 = self.exchange.get_margin()
self.running_qty = self.exchange.get_delta()
self.start_XBt = margin1["marginBalance"]
logger.info("Current XBT Balance : %.6f" % XBt_to_XBT(self.start_XBt))
logger.info("Contracts Traded This Run by BOT: %d" % (self.running_qty - self.starting_qty1))
logger.info("Total Contract Delta: %.4f XBT" % self.exchange.calc_delta()['spot'])
def macd_check(self):
# print("yes macd")
# as latest price is last one
up_vote = 0
down_vote = 0
data = Bitmex().get_historical_data(tick=settings.TICK_INTERVAL)
if data:
price_list = list(map(lambda i: i['close'], data))
data = indicators.macd(price_list)
status = data[-1]
if status > 0:
up_vote += 1
self.macd_signal = self.UP
elif status < 0:
down_vote += 1
self.macd_signal = self.DOWN
else:
self.macd_signal = False
else:
logger.error("Tick interval not supported")
def get_ticker(self):
ticker = self.exchange.get_ticker()
return ticker
###
# Orders
###
def place_orders(self, **kwargs):
"""Create order items for use in convergence."""
return self.exchange.place_order(**kwargs)
###
# Position Limits
###
def short_position_limit_exceeded(self):
"Returns True if the short position limit is exceeded"
if not settings.CHECK_POSITION_LIMITS:
return False
position = self.exchange.get_delta()
return position <= settings.MIN_POSITION
def long_position_limit_exceeded(self):
"Returns True if the long position limit is exceeded"
if not settings.CHECK_POSITION_LIMITS:
return False
position = self.exchange.get_delta()
# print(position)
return position >= settings.MAX_POSITION
def get_exchange_price(self):
data = self.get_ticker()
self.current_bid_price = data['buy']
self.current_ask_price = data['sell']
# price = float(self.current_ask_price+self.current_bid_price)/2
price = data['buy']
# if not (price == self.price_list[-1]):
self.last_price = price
self.macd_check()
###
# Sanity
##
def sanity_check(self):
"""Perform checks before placing orders."""
# Check if OB is empty - if so, can't quote.
self.exchange.check_if_orderbook_empty()
# Ensure market is still open.
self.exchange.check_market_open()
self.get_exchange_price()
# print(self.exchange.get_orders())
logger.info("current BITMEX price is {}".format(self.last_price))
# self.get_exchange_price()
logger.info("Current Price is {} MACD signal {}".format(self.last_price, self.macd_signal))
if not self.is_trade:
if self.macd_signal:
if self.macd_signal == self.UP:
logger.info("Buy Trade Signal {}".format(self.last_price))
logger.info("-----------------------------------------")
self.is_trade = True
self.sequence = self.BUY
if not self.initial_order:
order = self.place_orders(side=self.BUY, orderType='Market', quantity=self.amount)
self.trade_signal = self.macd_signal
self.initial_order = True
if settings.STOP_PROFIT_FACTOR != "":
self.profit_price = order['price'] + (order['price'] * settings.STOP_PROFIT_FACTOR)
if settings.STOP_LOSS_FACTOR != "":
self.stop_price = order['price'] - (order['price'] * settings.STOP_LOSS_FACTOR)
print("Order price {} \tStop Price {} \tProfit Price {} ".
format(order['price'], self.stop_price, self.profit_price))
sleep(settings.API_REST_INTERVAL)
if settings.STOP_LOSS_FACTOR != "":
self.place_orders(side=self.SELL, orderType='StopLimit', quantity=self.amount,
price=int(self.stop_price), stopPx=int(self.stop_price) - 5.0)
sleep(settings.API_REST_INTERVAL)
if settings.STOP_PROFIT_FACTOR != "":
self.place_orders(side=self.SELL, orderType='Limit', quantity=self.amount,
price=int(self.profit_price))
sleep(settings.API_REST_INTERVAL)
self.close_order = True
elif self.macd_signal == self.DOWN:
logger.info("Sell Trade Signal {}".format(self.last_price))
logger.info("-----------------------------------------")
self.is_trade = True
self.sequence = self.SELL
# place order
if not self.initial_order:
order = self.place_orders(side=self.SELL, orderType='Market', quantity=self.amount)
self.trade_signal = self.macd_signal
self.initial_order = True
if settings.STOP_PROFIT_FACTOR != "":
self.profit_price = order['price'] - (order['price'] * settings.STOP_PROFIT_FACTOR)
if settings.STOP_LOSS_FACTOR != "":
self.stop_price = order['price'] + (order['price'] * settings.STOP_LOSS_FACTOR)
print("Order price {} \tStop Price {} \tProfit Price {} ".
format(order['price'], self.stop_price, self.profit_price))
sleep(settings.API_REST_INTERVAL)
if settings.STOP_LOSS_FACTOR != "":
self.place_orders(side=self.BUY, orderType='StopLimit', quantity=self.amount,
price=int(self.stop_price), stopPx=int(self.stop_price) - 5.0)
sleep(settings.API_REST_INTERVAL)
if settings.STOP_PROFIT_FACTOR != "":
self.place_orders(side=self.BUY, orderType='Limit', quantity=self.amount,
price=int(self.profit_price))
sleep(settings.API_REST_INTERVAL)
self.close_order = True
# set cross margin for the trade
else:
if self.macd_signal and self.macd_signal != self.trade_signal and self.trade_signal:
# TODO close all positions on market price immediately and cancel ALL open orders(including stops).
self.exchange.close_position()
# sleep(settings.API_REST_INTERVAL)
self.exchange.cancel_all_orders()
self.is_trade = False
self.close_order = False
self.initial_order = False
self.sequence = ""
self.profit_price = 0
self.stop_price = 0
self.trade_signal = False
sleep(5)
elif self.close_order and self.exchange.get_position() == 0 and len(self.exchange.get_orders()) == 0:
self.is_trade = False
self.close_order = False
self.initial_order = False
self.sequence = ""
self.profit_price = 0
self.stop_price = 0
self.trade_signal = False
else:
data = self.exchange.get_orders()
if len(data) == 1:
if data[0]['ordType'] | |
np.array(wl.dec_hi)
regx = totalmatrix(self.dim, levels, g, h)
regy = sp.csc_matrix((1, self.dim * self.dim))
regx = sp.csc_matrix(regx)
regy = sp.csc_matrix(regy)
self.radonoperator = sp.csc_matrix(self.radonoperator)
alpha = alpha
self.Q.Lx = regx
self.Q.b = 0.0000
self.Q.Ly = regy
self.Q.a = alpha
self.Q.s2 = self.lhsigmsq
if (mapstart):
x0 = np.reshape(self.map_wavelet(alpha, type=type,levels=levels, maxiter=150), (-1, 1))
x0 = x0 + 0.000001 * np.random.rand(self.dim * self.dim, 1)
else:
x0 = 0.0 + 0.01*np.random.randn(self.dim * self.dim, 1)
print("Running MwG MCMC for Besov prior (" + type + ' ' + str(levels) + ').' )
solution,chain= mwgt(M, Madapt, self.Q, x0, sampsigma=1.0, cmonly=retim,thinning=thinning,interstep=interstep,intername=res.intermedfilename)
solution = np.reshape(solution, (-1, 1))
solution = np.reshape(solution, (self.dim, self.dim))
if not retim:
res.finish(result=solution, error=self.difference(solution),chain=chain,thinning=thinning)
return res
else:
return solution
def target(self):
return self.targetimage
def sinogram(self):
plt.imshow(self.sgram, extent=[self.theta[0], self.theta[-1], -np.sqrt(2), np.sqrt(2)])
plt.show()
def radonww(self,image, theta_in_angles,circle=False):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return radon(image, theta_in_angles, circle)
def difference(self,img):
t = np.ravel(np.reshape(self.targetimage,(1,-1)))
r = np.ravel(np.reshape(img,(1,-1)))
L1 = np.linalg.norm(t-r,ord=1)/np.linalg.norm(t,ord=1)
L2 = np.linalg.norm(t-r,ord=2)/np.linalg.norm(t,ord=2)
return L1,L2
def correlationrow(self,M):
if (len(M.shape) <= 1 or M.shape[0] <= 1):
M = M - np.mean(M)
M = correlate(M, M, mode='full', method='fft')
M = M[int((M.shape[0] - 1) / 2):]
return M / M[0]
else:
M = M - np.mean(M, axis=1, keepdims=True)
M = np.apply_along_axis(lambda x: correlate(x, x, mode='full', method='fft'), axis=1, arr=M)
M = M[:, int((M.shape[1] - 1) / 2):]
return M / np.reshape(M[:, 0], (-1, 1))
def saveresult(self,result):
import h5py
filename = self.globalprefix + result.prefix + ".hdf5"
path = os.path.dirname(os.path.abspath(filename))
if not os.path.exists(path):
os.makedirs(path)
with h5py.File(filename, 'w') as f:
for key, value in result.__dict__.items():
if (value is None):
value = "None"
if (isinstance(value, np.ndarray)):
compression = 'gzip'
value = value.astype(np.float32)
else:
compression = None
f.create_dataset(key, data=value, compression=compression)
f.close()
if __name__ == "__main__":
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
parser = argparse.ArgumentParser()
parser.add_argument('--file-name', default="shepp.png", type=str, help='Image filename. Default=shepp.png')
parser.add_argument('--targetsize', default=128, type=int, help='Input image is scaled to this size. Default=64')
parser.add_argument('--crimefree', default=False, type=bool, help='Simulate sinogram with larger grid and interpolate. Default=False')
parser.add_argument('--meas-noise', default=0.001, type=float, help='Measurement noise. Default=0.015')
parser.add_argument('--itheta', default=180,nargs="+", type=int, help='Range and/or number of radon measurement '
'angles in degrees. One must enter either 3 values (start angle, end angle, number of angles) or just the number of angles, in case the range 0-180 is assumed. Default=50')
parser.add_argument('--globalprefix', default="/results/", type=str, help='Relative prefix to the script itself, if one wants to save the results. Default= /results/')
parser.add_argument('--sampler', default="map", type=str, help='Method to use: hmc, mwg or map. Default= map')
parser.add_argument('--levels', default=None, type=int, help='Number of DWT levels to be used. Default=None means automatic.')
parser.add_argument('--prior', default="tikhonov", type=str,
help='Prior to use: tikhonov, cauchy, tv or wavelet. Default= cauchy')
parser.add_argument('--wave', default="haar", type=str, help='DWT type to use with wavelets. Default=haar')
parser.add_argument('--samples-num', default=200, type=int,
help='Number of samples to be generated within MCMC methods. Default=200, which should be completed within minutes by HMC at small dimensions.')
parser.add_argument('--thinning', default=1, type=int,
help='Thinning factor for MCMC methods. Default=1 is suitable for HMC, MwG might need thinning between 10-500. ')
parser.add_argument('--adapt-num', default=50, type=int, help='Number of adaptations in MCMC. Default=50, which roughly suits for HMC.')
parser.add_argument('--dataload', default=False, action='store_true', help='Use external data. Default=False')
parser.add_argument('--alpha', default=10, type=float,
help='Prior alpha (regulator constant). Default=1.0, which is rather bad for all priors.')
parser.add_argument('--omit', default=False, action='store_true',
help='Omit the command line arguments parsing section in the main.py')
args = parser.parse_args()
if len(sys.argv) > 1 and (args.omit is False) and (args.dataload is False):
t = tomography(filename=args.file_name, targetsize=args.targetsize, itheta=args.itheta, noise=args.meas_noise,crimefree=args.crimefree,commonprefix=args.globalprefix)
real = t.target()
r = None
if args.sampler == "hmc":
if args.prior == "cauchy":
r = t.hmcmc_cauchy(alpha=args.alpha, M=args.samples_num, Madapt=args.adapt_num,thinning=args.thinning)
elif args.prior == "tv":
r = t.hmcmc_tv(alpha=args.alpha, M=args.samples_num, Madapt=args.adapt_num,thinning=args.thinning)
elif args.prior == "wavelet":
r = t.hmcmc_wavelet(alpha=args.alpha, M=args.samples_num, Madapt=args.adapt_num,type=args.wave,levels=args.levels,thinning=args.thinning)
elif args.prior == "tikhonov":
r = t.hmcmc_tikhonov(alpha=args.alpha, M=args.samples_num, Madapt=args.adapt_num,thinning=args.thinning)
elif args.sampler == "mwg":
if args.prior == "cauchy":
r = t.mwg_cauchy(alpha=args.alpha, M=args.samples_num, Madapt=args.adapt_num,thinning=args.thinning)
elif args.prior == "tv":
r = t.mwg_tv(alpha=args.alpha, M=args.samples_num, Madapt=args.adapt_num,thinning=args.thinning)
elif args.prior == "wavelet":
r = t.mwg_wavelet(alpha=args.alpha, M=args.samples_num, Madapt=args.adapt_num,type=args.wave,levels=args.levels,thinning=args.thinning)
elif args.sampler == "map":
if args.prior == "cauchy":
r = t.map_cauchy(alpha=args.alpha,maxiter=125)
elif args.prior == "tv":
r = t.map_tv(alpha=args.alpha,maxiter=125)
elif args.prior == "wavelet":
r = t.map_wavelet(alpha=args.alpha,type=args.wave,levels=args.levels)
elif args.prior == "tikhonov":
r = t.map_tikhonov(alpha=args.alpha)
plt.imshow(r)
plt.show()
elif len(sys.argv) > 1 and (args.omit is False) and (args.dataload is True):
import scipy.io
from scipy.stats import zscore
#m = scipy.io.loadmat('Walnut.mat')['FBP1200'].T
#m = resize(m, (328, 328), anti_aliasing=False, preserve_range=True,order=1, mode='symmetric')
#t = tomography(targetsize=64,commonprefix='/isot')
#t.dataload('LotusData256.mat',"A",'LotusData256.mat','m')
#t.dataload('CheeseData_256x180.mat', "A", 'CheeseData_256x180.mat', 'm',imsize=256)
#t.dataload('WalnutData164.mat', "A", 'WalnutData164.mat', 'm')
#t.lhsigmsq = 0.05
#t.Q = argumentspack(M=t.radonoperator, y=t.lines, b=0.01, s2=0.05)
#t.targetimage = np.random.randn(t.dim,t.dim)
#t.theta = np.array([0,90])
#r = t.hmcmc_tv(alpha=100, M=100, Madapt=20, thinning=1, retim=False, interstep=9, variant='ehmc')
#r=t.map_cauchy(alpha=0.01,retim=True)
#plt.imshow(r)
#plt.show()
# If we do not care the command line.
else:
#https://stackoverflow.com/questions/19189274/nested-defaultdict-of-defaultdict
from collections import defaultdict
import json
class NestedDefaultDict(defaultdict):
def __init__(self, *args, **kwargs):
super(NestedDefaultDict, self).__init__(NestedDefaultDict, *args, **kwargs)
def __repr__(self):
return repr(dict(self))
np.random.seed(1)
t = tomography("shepp.png", 128, 16, 0.02, crimefree=False)
res = t.map_cauchy(0.01, retim=True,isotropic=False)
plt.imshow(res)
plt.show()
plt.figure()
t = tomography("shepp.png", 64, 8, 0.02, crimefree=False)
res = t.mwg_cauchy(0.1, isotropic=True, mapstart=True, M=10000, Madapt=5000, retim=False, thinning=10,
interstep=1000000)
chain = res.chain
plt.plot(chain[10, :])
plt.plot(chain[100, :])
plt.figure()
plt.imshow(np.reshape(res.chain[:, 1], (64, 64)))
plt.show()
exit(0)
'''
angles = {'sparsestwhole': 10, 'sparsewhole': 30, 'whole': 90, 'sparsestlimited': (0, 90, 10),'sparselimited': (0, 90, 30), 'limited': (0, 90, 90)}
noises = ( 0.015,)
sizes = (512,)
alphas = np.geomspace(0.1,1000,15)
tikhoalpha = NestedDefaultDict()
for size in sizes:
for angletype,angle in angles.items():
for noise in noises:
bestl2 = np.Inf
best = 0
t = tomography("slices/299.mat", size, angle, noise, crimefree=True, commonprefix='/results/')
t2 = tomography("slices/299.mat", size, angle, noise, crimefree=True, commonprefix='/results/')
for alpha in alphas:
res = t.map_tikhonov(alpha, retim=False,maxiter=500)
res2 = t2.map_tikhonov(alpha, retim=False, maxiter=500)
if ((res.l2 + res2.l2)/2.0 < bestl2):
best = alpha
bestl2 = (res.l2 + res2.l2)/2.0
tikhoalpha[angletype][size][noise] = best
jsontik = json.dumps(tikhoalpha)
f = open("tikhonov.json", "w")
f.write(jsontik)
f.close()
print(tikhoalpha)
alphas = np.geomspace(0.1, 1000, 15)
tvalpha = NestedDefaultDict()
for size in sizes:
for angletype, angle in angles.items():
for noise in noises:
bestl2 = np.Inf
best = 0
t = tomography("slices/299.mat", size, angle, noise, crimefree=True, commonprefix='/results/')
t2 = tomography("slices/299.mat", size, angle, noise, crimefree=True, commonprefix='/results/')
for alpha in alphas:
res = t.map_tv(alpha, retim=False, maxiter=500)
res2 = t2.map_tv(alpha, retim=False, maxiter=500)
if ((res.l2 + res2.l2) / 2.0 < bestl2):
best = alpha
bestl2 = (res.l2 + res2.l2) / 2.0
tvalpha[angletype][size][noise] = best
jsontv = json.dumps(tvalpha)
f = open("tv.json", "w")
f.write(jsontv)
f.close()
print(tvalpha)
alphas = np.geomspace(0.000001, 5, 15)
cauchyalpha = NestedDefaultDict()
for size in sizes:
for angletype, angle in angles.items():
for noise in noises:
bestl2 = np.Inf
best = 0
t = tomography("slices/299.mat", size, angle, noise, crimefree=True, commonprefix='/results/')
t2 = tomography("slices/299.mat", size, angle, noise, crimefree=True, commonprefix='/results/')
for alpha in alphas:
res = t.map_cauchy(alpha, retim=False, maxiter=500)
res2 = t2.map_cauchy(alpha, retim=False, maxiter=500)
if ((res.l2 + res2.l2) / 2.0 < bestl2):
best = alpha
bestl2 = (res.l2 + res2.l2) / 2.0
cauchyalpha[angletype][size][noise] = best
jsoncau= json.dumps(cauchyalpha)
f = open("cauchy.json", "w")
f.write(jsoncau)
f.close()
print(cauchyalpha)
alphas = np.geomspace(0.01, 1000, 15)
haaralpha = NestedDefaultDict()
for size in sizes:
for angletype, angle in angles.items():
for noise in noises:
bestl2 = np.Inf
best = 0
t = tomography("slices/299.mat", size, angle, noise, crimefree=True, commonprefix='/results/')
t2 = tomography("slices/299.mat", size, angle, noise, crimefree=True, commonprefix='/results/')
for alpha in alphas:
res = t.map_wavelet(alpha, type='haar', retim=False, maxiter=500)
res2 = t2.map_wavelet(alpha, type='haar', retim=False, maxiter=500)
if ((res.l2 + res2.l2) / 2.0 < bestl2):
best = alpha
bestl2 = (res.l2 + res2.l2) / 2.0
haaralpha[angletype][size][noise] = best
jsonhaar = json.dumps(haaralpha)
f = open("haar.json", "w")
f.write(jsonhaar)
f.close()
print(haaralpha)
exit(0)
tikhoalpha = {"sparsestwhole": {512: {0.015: 10.0}}, "sparsewhole": {512: {0.015: 10.0}}, "whole": {512: {0.015: 10.0}}, "sparsestlimited": {512: {0.015: 0.372759372031494}}, "sparselimited": {512: {0.015: 0.7196856730011519}}, "limited": {512: {0.015: 2.6826957952797246}}}
tvalpha = {"sparsestwhole": {512: {0.015: 0.7196856730011519}}, "sparsewhole": {512: {0.015: 1.3894954943731375}}, "whole": {512: {0.015: 2.6826957952797246}}, "sparsestlimited": {512: {0.015: 0.1}}, "sparselimited": {512: {0.015: 0.372759372031494}}, "limited": {512: {0.015: 0.372759372031494}}}
haaralpha = {"sparsestwhole": {512: {0.015: 1.3894954943731375}}, "sparsewhole": {512: {0.015: 1.3894954943731375}}, "whole": {512: {0.015: 3.1622776601683795}}, "sparsestlimited": {512: {0.015: 0.2682695795279726}}, "sparselimited": {512: {0.015: 1.3894954943731375}}, "limited": {512: {0.015: 1.3894954943731375}}}
cauchyalpha = {"sparsestwhole": {512: {0.015: 0.0014142135623730952}}, "sparsewhole": {512: {0.015: 0.003986470631277378}}, "whole": {512: {0.015: 0.031676392175331615}}, "sparsestlimited": {512: {0.015: 2.0}}, "sparselimited": {512: {0.015: 0.7095065752033103}}, "limited": {512: {0.015: 0.25169979012836524}}}
noises = (0.015,)
sizes = (512,)
angles = {'sparsewhole': 30, 'whole': 90}
for | |
'861594995':{'en': 'Zaozhuang, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u67a3\u5e84\u5e02')},
'861594996':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')},
'861594997':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')},
'861594998':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')},
'861594999':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')},
'86159500':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861595010':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861595011':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861595012':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861595013':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861595014':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861595015':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861595016':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861595017':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861595018':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861595019':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'86159502':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861595030':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861595031':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861595032':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861595033':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861595034':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861595035':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861595036':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861595037':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861595038':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861595039':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861595040':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861595041':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861595042':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861595043':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861595044':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861595045':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861595046':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861595047':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861595048':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861595049':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'86159505':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861595060':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861595061':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861595062':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861595063':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861595064':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861595065':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861595066':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861595067':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861595068':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861595069':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'86159507':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')},
'86159508':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'86159509':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'86159510':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861595104':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'861595105':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'861595106':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861595109':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861595110':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861595111':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861595112':{'en': '<NAME>su', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861595113':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861595114':{'en': 'Ta<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861595115':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861595116':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861595117':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861595118':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861595119':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861595120':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861595121':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861595122':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861595123':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861595124':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861595125':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')},
'861595126':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861595127':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861595128':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861595129':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861595130':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861595131':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861595132':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861595133':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861595134':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861595135':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861595136':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861595137':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861595138':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861595139':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861595140':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861595141':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861595142':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861595143':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'861595144':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'861595145':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861595146':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861595147':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861595148':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861595149':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')},
'861595150':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861595151':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861595152':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861595153':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861595154':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861595155':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861595156':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861595157':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861595158':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861595159':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'86159516':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'86159517':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'86159518':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'86159519':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'86159520':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'86159521':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'86159522':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'86159523':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'86159524':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861595246':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861595247':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861595248':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861595249':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'86159525':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'86159526':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'86159527':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'86159528':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'86159529':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'86159530':{'en': 'He<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u83cf\u6cfd\u5e02')},
'861595308':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')},
'861595309':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')},
'86159531':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'86159532':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')},
'86159533':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')},
'861595338':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')},
'861595339':{'en': 'Rizhao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')},
'861595340':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861595341':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861595342':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861595343':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861595344':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861595345':{'en': 'Jining, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')},
'861595346':{'en': 'Jining, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')},
'861595347':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')},
'861595348':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')},
'861595349':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')},
'86159535':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')},
'86159536':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6f4d\u574a\u5e02')},
'861595370':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')},
'861595371':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')},
'861595372':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')},
'861595373':{'en': 'Jining, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')},
'861595374':{'en': 'Jining, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')},
'861595375':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861595376':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861595377':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861595378':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861595379':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'86159538':{'en': 'TaiAn, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6cf0\u5b89\u5e02')},
'861595386':{'en': 'We<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861595387':{'en': 'We<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861595388':{'en': 'We<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861595389':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'86159539':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u4e34\u6c82\u5e02')},
'861595400':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')},
'861595401':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')},
'861595402':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')},
'861595403':{'en': 'Weihai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861595404':{'en': 'Weihai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861595405':{'en': 'Weihai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861595406':{'en': 'Weihai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5a01\u6d77\u5e02')},
'861595407':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u4e34\u6c82\u5e02')},
'861595408':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')},
'861595409':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')},
'861595410':{'en': 'Jinan, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861595411':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861595412':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861595413':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')},
'861595414':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')},
'861595415':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u804a\u57ce\u5e02')},
'861595416':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u804a\u57ce\u5e02')},
'861595417':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u804a\u57ce\u5e02')},
'861595418':{'en': 'Liaocheng, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u804a\u57ce\u5e02')},
'861595419':{'en': 'Liaocheng, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u804a\u57ce\u5e02')},
'86159542':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')},
'86159543':{'en': 'Binzhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6ee8\u5dde\u5e02')},
'861595438':{'en': 'Linyi, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u4e34\u6c82\u5e02')},
'861595439':{'en': 'Zaozhuang, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u67a3\u5e84\u5e02')},
'86159544':{'en': 'Weifang, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6f4d\u574a\u5e02')},
'86159545':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')},
'861595455':{'en': 'Liaocheng, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u804a\u57ce\u5e02')},
'861595456':{'en': 'Liaocheng, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u804a\u57ce\u5e02')},
'861595457':{'en': 'Liaocheng, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u804a\u57ce\u5e02')},
'861595458':{'en': 'Liaocheng, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u804a\u57ce\u5e02')},
'86159546':{'en': 'Dongying, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u4e1c\u8425\u5e02')},
'861595469':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')},
'861595470':{'en': 'Jining, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')},
'861595471':{'en': 'Jining, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')},
'861595472':{'en': 'Heze, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u83cf\u6cfd\u5e02')},
'861595473':{'en': 'Binzhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6ee8\u5dde\u5e02')},
'861595474':{'en': 'Rizhao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u65e5\u7167\u5e02')},
'861595475':{'en': 'Dongying, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u4e1c\u8425\u5e02')},
'861595476':{'en': 'TaiAn, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6cf0\u5b89\u5e02')},
'861595477':{'en': 'Dezhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u5fb7\u5dde\u5e02')},
'861595478':{'en': 'Jinan, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861595479':{'en': 'Zibo, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')},
'86159548':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')},
'861595490':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861595491':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5357\u5e02')},
'861595492':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u804a\u57ce\u5e02')},
'861595493':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u804a\u57ce\u5e02')},
'861595494':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u804a\u57ce\u5e02')},
'861595495':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')},
'861595496':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')},
'861595497':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')},
'861595498':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')},
'861595499':{'en': 'Yantai, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u70df\u53f0\u5e02')},
'86159550':{'en': 'Chuzhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6ec1\u5dde\u5e02')},
'86159551':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'86159552':{'en': 'Bengbu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u868c\u57e0\u5e02')},
'86159553':{'en': 'Wuhu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u829c\u6e56\u5e02')},
'86159554':{'en': 'Huainan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5357\u5e02')},
'861595550':{'en': 'Chuzhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6ec1\u5dde\u5e02')},
'861595551':{'en': 'MaAnshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9a6c\u978d\u5c71\u5e02')},
'861595552':{'en': 'MaAnshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9a6c\u978d\u5c71\u5e02')},
'861595553':{'en': 'MaAnshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9a6c\u978d\u5c71\u5e02')},
'861595554':{'en': 'MaAnshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9a6c\u978d\u5c71\u5e02')},
'861595555':{'en': 'MaAnshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9a6c\u978d\u5c71\u5e02')},
'861595556':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861595557':{'en': 'Suzhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5bbf\u5dde\u5e02')},
'861595558':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861595559':{'en': 'Huangshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9ec4\u5c71\u5e02')},
'861595560':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861595561':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861595562':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861595563':{'en': 'Xuancheng, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5ba3\u57ce\u5e02')},
'861595564':{'en': 'LuAn, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'861595565':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861595566':{'en': 'Chizhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6c60\u5dde\u5e02')},
'861595567':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861595568':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861595569':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'86159557':{'en': 'Suzhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5bbf\u5dde\u5e02')},
'86159558':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'86159559':{'en': 'LuAn, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'861595590':{'en': 'Huangshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9ec4\u5c71\u5e02')},
'861595600':{'en': 'Suzhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5bbf\u5dde\u5e02')},
'861595601':{'en': 'Suzhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5bbf\u5dde\u5e02')},
'861595602':{'en': 'Suzhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5bbf\u5dde\u5e02')},
'861595603':{'en': 'LuAn, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'861595604':{'en': 'LuAn, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'861595605':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861595606':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861595607':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861595608':{'en': 'Chuzhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6ec1\u5dde\u5e02')},
'861595609':{'en': 'Chuzhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6ec1\u5dde\u5e02')},
'86159561':{'en': 'Huaibei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5317\u5e02')},
'861595619':{'en': 'Xuancheng, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5ba3\u57ce\u5e02')},
'861595620':{'en': 'Tongling, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u94dc\u9675\u5e02')},
'861595621':{'en': 'Tongling, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u94dc\u9675\u5e02')},
'861595622':{'en': 'Tongling, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u94dc\u9675\u5e02')},
'861595623':{'en': 'Chizhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6c60\u5dde\u5e02')},
'861595624':{'en': 'Chizhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6c60\u5dde\u5e02')},
'861595625':{'en': 'Chizhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6c60\u5dde\u5e02')},
'861595626':{'en': 'Xu<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u5ba3\u57ce\u5e02')},
'861595627':{'en': 'Xuancheng, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5ba3\u57ce\u5e02')},
'861595628':{'en': 'Xuancheng, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5ba3\u57ce\u5e02')},
'861595629':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u5ba3\u57ce\u5e02')},
'86159563':{'en': 'Xu<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u5ba3\u57ce\u5e02')},
'86159564':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'86159565':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861595660':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u6c60\u5dde\u5e02')},
'861595661':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u6c60\u5dde\u5e02')},
'861595662':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u6c60\u5dde\u5e02')},
'861595663':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u6c60\u5dde\u5e02')},
'861595664':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u6c60\u5dde\u5e02')},
'861595665':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5357\u5e02')},
'861595666':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5357\u5e02')},
'861595667':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5357\u5e02')},
'861595668':{'en': 'Huainan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5357\u5e02')},
'861595669':{'en': 'Huainan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5357\u5e02')},
'86159567':{'en': 'Bozhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u4eb3\u5dde\u5e02')},
'861595670':{'en': 'Huaibei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5317\u5e02')},
'861595671':{'en': 'Huaibei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5317\u5e02')},
'86159568':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'86159569':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861595700':{'en': 'Quzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u8862\u5dde\u5e02')},
'861595701':{'en': 'Quzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u8862\u5dde\u5e02')},
'861595702':{'en': 'Quzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u8862\u5dde\u5e02')},
'861595703':{'en': 'Quzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u8862\u5dde\u5e02')},
'861595704':{'en': 'Quzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u8862\u5dde\u5e02')},
'861595705':{'en': 'Zhoushan, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u821f\u5c71\u5e02')},
'861595706':{'en': 'Zhoushan, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u821f\u5c71\u5e02')},
'861595707':{'en': 'Zhoushan, Zhejiang', | |
:param title_desc: boolean that indicate group info sync
:param contributors: boolean that indicate contribuors
:param lock_node: True ... use locker.lock_node()
:return: True on success, False on sync skip condition
'''
logger.debug('mapcore_sync_map_group(\'' + utf8(node.title) + '\') started.')
# check Node attribute
if node.is_deleted:
logger.info('Node is deleted. nothing to do.')
return False
try:
if lock_node:
locker.lock_node(node)
group_key = node.map_group_key
# sync group info
if title_desc:
mapcore_update_group(access_user, node, group_key)
logger.info('Node title [' + utf8(node.title) + '] and desctiption are synchronized to mAP group [' + utf8(group_key) + '].')
# sync members
if contributors:
rdm_members = []
for member in query_contributors(node):
rdmu = RDMmember(node, member)
rdm_members.append(rdmu)
# logger.debug('RDM contributor:\n' + pp(vars(rdmu)))
map_group = mapcore_get_extended_group_info(access_user, node, group_key)
map_members = map_group['group_member_list']
#logger.debug('mAP group info:\n' + pp(map_group))
#logger.debug('mAP group members: ' + pp(map_members))
add, delete, upgrade, downgrade = compare_members(rdm_members, map_members, True)
# add: RDMmember (unicode),
# delete: map_member (utf-8)
# upgrade: map_member
# downgrade: map_member
# apply members to mAP group
for u in add:
if u.is_admin:
admin = MAPCore.MODE_ADMIN
else:
admin = MAPCore.MODE_MEMBER
mapcore_add_to_group(access_user, node, group_key, u.eppn, admin)
logger.info('mAP group [' + map_group['group_name'] + '] get new member [' + utf8(u.eppn) + ']')
for u in delete:
eppn = u['eppn']
try:
user = OSFUser.objects.get(eppn=eppn)
except Exception as e:
user = None
logger.info('User(eppn={}) does not exist in GRDM. The user is not removed from the mAP group({}).'.format(eppn, map_group['group_name']))
add_log(NodeLog.MAPCORE_RDM_UNKNOWN_USER,
node, access_user, e, save=True)
if user:
mapcore_remove_from_group(access_user, node, group_key, eppn)
logger.info('mAP group [' + map_group['group_name'] + '] member [' + eppn + '] is removed')
for u in upgrade:
mapcore_edit_member(access_user, node, group_key, u['eppn'], MAPCore.MODE_ADMIN)
logger.info('mAP group [' + map_group['group_name'] + '] admin [' + u['eppn'] + '] is a new member')
for u in downgrade:
mapcore_edit_member(access_user, node, group_key, u['eppn'], MAPCore.MODE_MEMBER)
logger.info('mAP group [' + map_group['group_name'] + '] member [' + u['eppn'] + '] is a new admin')
finally:
if lock_node:
locker.unlock_node(node)
return True
def mapcore_sync_map_group(access_user, node, title_desc=True, contributors=True, use_raise=False, lock_node=True):
try:
try:
ret = mapcore_sync_map_group0(access_user, node, title_desc=title_desc, contributors=contributors, lock_node=lock_node)
except MAPCoreException as e:
if e.group_does_not_exist():
logger.info('GRDM project [{} ({})] is deleted because linked mAP group does not exist.'.format(utf8(node.title), node._id))
remove_node(node)
return False
raise
except Exception as e:
logger.warning('GRDM project [{} ({})] cannot be uploaded to mAP. (retry later), reason={}'.format(utf8(node.title), node._id, utf8(str(e))))
add_log(NodeLog.MAPCORE_MAP_GROUP_NOT_UPDATED, node, access_user, e,
save=True)
try:
mapcore_set_standby_to_upload(node) # retry later
except Exception:
import traceback
logger.warning('mapcore_set_standby_to_upload error: {}'.format(traceback.format_exc()))
if use_raise:
raise
return False
if ret: # OK
try:
mapcore_unset_standby_to_upload(node)
except Exception:
import traceback
logger.warning('mapcore_set_standby_to_upload error: {}'.format(traceback.format_exc()))
return ret
def mapcore_url_is_my_projects(request_url):
pages = ['dashboard', 'my_projects']
request_path = urlparse(request_url).path
for name in pages:
if request_path == urlparse(web_url_for(name)).path:
return True
return False
def mapcore_sync_rdm_my_projects0(user):
'''
自分が所属しているRDMプロジェクトとmAPグループを比較する。
RDMとmAPの両方にグループに所属:
タイトルが変わっていない場合: なにもしない
タイトルが変わっている場合: RDM側に(またはmAP側に)タイトルを反映
mAPグループだけに所属:
対応するプロジェクトがRDM側に存在:
つまりcontributors不整合状態
RDM側に反映 (mAP側に反映すべき情報がある場合はmAP側へ反映)
対応するプロジェクトがRDM側に無い:
RDM側にプロジェクトを作成し、mAPグループから情報取得してRDM側に反映
RDMプロジェクトだけに所属:
group_keyがセットされていない:
つまりまだmAP側グループと関連付けられていない
プロジェクト画面遷移時にmAPグループを作成するので、ここでは何もしない
group_keyがセットされている:
(以下、mapcore_sync_rdm_projectで処理)
mAP側にグループが存在:
つまりcontributors不整合状態(所属状態が一致していないため)
RDM側に反映 (またはmAP側に反映すべき情報がある場合はmAP側へ反映)
mAP側にグループが無い:
プロジェクトをis_deleted=Trueにする
:param user: OSFUser
:return: なし。エラー時には例外投げる
'''
logger.debug('starting mapcore_sync_rdm_my_projects(\'' + user.eppn + '\').')
try:
locker.lock_user(user)
my_rdm_projects = {}
sync_id_list = []
for project in Node.objects.filter(contributor__user__id=user.id):
if project.map_group_key:
my_rdm_projects[project.map_group_key] = project
# if project.map_group_key is None:
# ... This project will be synchronized in _view_project()
mapcore = MAPCore(user)
result = mapcore.get_my_groups()
my_map_groups = {}
for grp in result['result']['groups']:
group_key = grp['group_key']
my_map_groups[group_key] = grp
if not grp['active'] or not grp['public']:
logger.warning('mAP group [' + grp['group_name'] + '] has unsuitable attribute(s). (ignored)')
continue
if mapcore_group_member_is_private(grp):
logger.warning('mAP group( {} ) member list is private. (skipped)'.format(grp['group_name']))
continue
logger.debug('mAP group [' + grp['group_name'] + '] (' + grp['group_key'] + ') is a candidate to Sync.')
project_exists = False
try:
node = Node.objects.get(map_group_key=group_key)
project_exists = True
# exists in RDM and mAP
except ObjectDoesNotExist as e:
# exists only in mAP -> create new Node in RDM
try:
node = mapcore_create_new_node_from_mapgroup(mapcore, grp)
if node is None:
logger.error('cannot create GRDM project for mAP group [' + grp['group_name'] + ']. skip.')
continue
#logger.info('New node is created from mAP group [' + grp['group_name'] + '] (' + group_key + ')')
# copy info and members to RDM
mapcore_sync_rdm_project(user, node,
title_desc=True,
contributors=True,
use_raise=True)
except MAPCoreException as e:
if e.group_does_not_exist():
# This group is not linked to this RDM SP.
# Other SPs may have the group.
del my_map_groups[group_key]
logger.info('mAP group({}, group_key={}) exists but it is not linked to this GRDM service provider.'.format(grp['group_name'], group_key))
else:
logger.debug('MAPCoreException: {}'.format(utf8(str(e))))
raise
if project_exists and not node.is_deleted:
# different contributors or title
if my_rdm_projects.get(group_key) is None:
logger.debug('different contributors: group_key={}'.format(node.map_group_key))
mapcore_sync_rdm_project_or_map_group(user, node)
sync_id_list.append(node._id)
elif node.title != utf8dec(grp['group_name']):
#logger.debug('node.title={}, grp[group_name]={}'.format(utf8(node.title), grp['group_name']))
logger.debug('different title: group_key={}'.format(node.map_group_key))
mapcore_sync_rdm_project_or_map_group(user, node)
sync_id_list.append(node._id)
for group_key, project in my_rdm_projects.items():
if project.is_deleted:
#logger.info('GRDM project [{} ({})] was deleted. (skipped)'.format(utf8(project.title), project._id))
continue
if project._id in sync_id_list:
continue
grp = my_map_groups.get(group_key)
if grp:
if project.title == utf8dec(grp['group_name']):
# already synchronized project
continue
else:
logger.debug('different title')
mapcore_sync_rdm_project_or_map_group(user, project)
else:
# Project contributors is different from mAP group members.
mapcore_sync_rdm_project_or_map_group(user, project)
### to create new mAP groups at /myprojects/
# for project in Node.objects.filter(contributor__user__id=user.id):
# if project.map_group_key is None:
# mapcore_sync_rdm_project_or_map_group(user, project)
finally:
locker.unlock_user(user)
logger.debug('mapcore_sync_rdm_my_projects finished.')
def mapcore_sync_rdm_my_projects(user, use_raise=False):
try:
mapcore_sync_rdm_my_projects0(user)
except Exception as e:
logger.error('User(username={}, eppn={}) cannot compare my GRDM Projects and my mAP groups, reason={}'.format(user.username, user.eppn, utf8(str(e))))
if use_raise:
raise
def mapcore_set_standby_to_upload(node, log=True):
with transaction.atomic():
n = Node.objects.select_for_update().get(guids___id=node._id)
n.mapcore_standby_to_upload = timezone.now()
n.save()
if log:
logger.info('Project({}) will be uploaded to mAP (next time).'.format(node._id))
logger.info('Project({}).mapcore_standby_to_upload={}'.format(node._id, n.mapcore_standby_to_upload))
node.reload()
def mapcore_is_on_standby_to_upload(node):
with transaction.atomic():
n = Node.objects.select_for_update().get(guids___id=node._id)
return n.mapcore_standby_to_upload is not None
def mapcore_unset_standby_to_upload(node):
with transaction.atomic():
n = Node.objects.select_for_update().get(guids___id=node._id)
n.mapcore_standby_to_upload = None
n.save()
logger.debug('Project({}).mapcore_standby_to_upload=None'.format(node._id))
node.reload()
SYNC_CACHE_TIME = 10 # sec.
def mapcore_clear_sync_time(node):
try:
with transaction.atomic():
n = Node.objects.select_for_update().get(guids___id=node._id)
n.mapcore_sync_time = None
n.save()
node.reload()
except Exception as e:
logger.error('mapcore_clear_sync_time: {}'.format(utf8(str(e))))
# ignore
def mapcore_set_sync_time(node):
try:
with transaction.atomic():
n = Node.objects.select_for_update().get(guids___id=node._id)
n.mapcore_sync_time = timezone.now()
n.save()
node.reload()
except Exception as e:
logger.error('mapcore_set_sync_time: {}'.format(utf8(str(e))))
# ignore
def mapcore_is_sync_time_expired(node):
if node.mapcore_sync_time is None:
return True
if timezone.now() >= node.mapcore_sync_time + datetime.timedelta(seconds=SYNC_CACHE_TIME):
logger.debug('mapcore_is_sync_time_expired: need sync')
return True
else:
logger.debug('mapcore_is_sync_time_expired: skip sync')
return False
def mapcore_sync_rdm_project_or_map_group0(access_user, node, use_raise=False):
if node.is_deleted:
return
if not mapcore_is_sync_time_expired(node):
return # skipped
if node.map_group_key is None:
admin_user = get_one_admin(node)
group_key = mapcore_sync_map_new_group(admin_user, node,
use_raise=use_raise)
if group_key:
node.map_group_key = group_key
node.save()
mapcore_sync_map_group(access_user, node,
title_desc=True, contributors=True,
use_raise=use_raise, lock_node=False)
elif mapcore_is_on_standby_to_upload(node):
mapcore_sync_map_group(access_user, node,
title_desc=True, contributors=True,
use_raise=use_raise, lock_node=False)
else:
mapcore_sync_rdm_project(access_user, node,
title_desc=True, contributors=True,
use_raise=use_raise, lock_node=False)
mapcore_set_sync_time(node)
def mapcore_sync_rdm_project_or_map_group(access_user, node, use_raise=False):
try:
locker.lock_node(node)
mapcore_sync_rdm_project_or_map_group0(access_user, node,
use_raise=use_raise)
finally:
locker.unlock_node(node)
#
# debugging utilities
#
# add a contirbutor to a project
def add_contributor_to_project(node_name, eppn):
from osf.utils.permissions import DEFAULT_CONTRIBUTOR_PERMISSIONS
# get node object
try:
node = Node.objects.get(title=node_name)
except Exception as e:
print(type(e))
print(e)
return
# get user object
try:
user = OSFUser.objects.get(eppn=eppn)
except Exception as e:
print(type(e))
print(e)
return
# add user as contributor
if node.is_contributor(user):
print('user is already joind')
return
ret = node.add_contributor(user, log=True, save=False, permissions=DEFAULT_CONTRIBUTOR_PERMISSIONS)
print('add_contoributor returns: ' + ret)
return
def user_lock_test(user, sleep_sec):
try:
locker.lock_user(user)
print('User (GUID: ' + user._id + ') is locked.')
print('locked: sleep {}'.format(sleep_sec))
time.sleep(sleep_sec)
except KeyboardInterrupt:
print('interrupted!')
except Exception:
pass
finally:
locker.unlock_user(user)
print('User (GUID: ' + user._id + ') is unlocked.')
def node_lock_test(node, sleep_sec):
try:
locker.lock_node(node)
print('Node (GUID=' + node._id + ') is locked.')
print('locked: sleep {}'.format(sleep_sec))
time.sleep(sleep_sec)
# for i in range(sleep_sec):
# print('mapcore_api_locked={}'.format(
# Node.objects.get(guids___id=node._id).mapcore_api_locked))
# time.sleep(1)
except KeyboardInterrupt:
print('interrupted!')
except Exception:
pass
finally:
locker.unlock_node(node)
print('Node (GUID=' + node._id + ') is unlocked.')
if __name__ == '__main__':
print('In Main')
if False:
# API呼び出しの権限テスト
me1 = OSFUser.objects.get(eppn=sys.argv[2])
map1 = MAPCore(me1)
me2 = OSFUser.objects.get(eppn=sys.argv[3])
map2 = MAPCore(me2)
me3 = OSFUser.objects.get(eppn=sys.argv[4])
map3 = MAPCore(me3)
try:
res = map1.get_group_by_key(sys.argv[1])
grp1 = res['result']['groups'][0]
gk1 = grp1['group_key']
print('Title [' + grp1['group_name'] + '], Key [' + grp1['group_key'] + '], by user [' + me1.eppn + ']')
res = map1.get_group_members(gk1)
for mem in res['result']['accounts']:
print('ePPN [' + mem['eppn'] + '], Account [' + mem['account_name'] + ']')
except Exception as e:
print(e.message)
try:
res = map2.get_group_by_key(sys.argv[1])
grp2 = res['result']['groups'][0]
gk2 = grp2['group_key']
print('Title [' + grp2['group_name'] + '], Key [' + grp2['group_key'] + '], by user [' + me2.eppn + ']')
res = map2.get_group_members(gk2)
for mem in res['result']['accounts']:
print('ePPN [' + mem['eppn'] + '], Account [' + mem['account_name'] + ']')
except Exception as e:
print(e.message)
try:
res = map3.get_group_by_key(sys.argv[1])
grp3 = res['result']['groups'][0]
gk3 | |
root guard on the interfaces',
'arguments': [
{
'name': 'action',
'doc': (
'enable Enable the root guard on the interfaces'
'disable Disable the root guard on the interfaces'
),
}
],
},
{
'command': 'no spanning-tree root-guard {action}',
'doc': 'Enable/Disable the root guard on the interfaces',
'arguments': [
{
'name': 'action',
'doc': (
'enable Enable the root guard on the interfaces'
'disable Disable the root guard on the interfaces'
),
'optional': True
}
],
},
{
'command': 'spanning-tree loop-guard {action}',
'doc': 'Enable/Disable the loop guard on the interfaces',
'arguments': [
{
'name': 'action',
'doc': (
'enable Enable the loop guard on the interfaces'
'disable Disable the loop guard on the interfaces'
),
}
],
},
{
'command': 'no spanning-tree loop-guard {action}',
'doc': 'Enable/Disable the loop guard on the interfaces',
'arguments': [
{
'name': 'action',
'doc': (
'enable Enable the loop guard on the interfaces'
'disable Disable the loop guard on the interfaces'
),
'optional': True
}
],
},
{
'command': 'spanning-tree bpdu-filter {action}',
'doc': 'Enable/Disable the bpdu filter on the interfaces',
'arguments': [
{
'name': 'action',
'doc': (
'enable Enable the bpdu filter on the interfaces'
'disable Disable the bpdu filter on the interfaces'
),
}
],
},
{
'command': 'no spanning-tree bpdu-filter {action}',
'doc': 'Enable/Disable the bpdu filter on the interfaces',
'arguments': [
{
'name': 'action',
'doc': (
'enable Enable the bpdu filter on the interfaces'
'disable Disable the bpdu filter on the interfaces'
),
'optional': True
}
],
},
{
'command': 'spanning-tree instance {instance_id} '
'cost {cost}',
'doc': 'Specify a standard to use when calculating '
'the default pathcost',
'arguments': [
{
'name': 'instance_id',
'doc': 'Specifies the MSTP instance number <1-64>',
},
{
'name': 'cost',
'doc': 'Path cost range <1-200000000>',
}
],
},
{
'command': 'no spanning-tree instance {instance_id} '
'cost {cost}',
'doc': 'Specify a standard to use when calculating '
'the default pathcost',
'arguments': [
{
'name': 'instance_id',
'doc': 'Specifies the MSTP instance number <1-64>',
},
{
'name': 'cost',
'doc': 'Path cost range <1-200000000>',
'optional': True
}
],
},
{
'command': 'spanning-tree instance {instance_id} '
'port-priority {priority}',
'doc': 'Maps the priority to corresponding instance',
'arguments': [
{
'name': 'instance_id',
'doc': 'Specifies the MSTP instance number <1-64>',
},
{
'name': 'priority',
'doc': 'The device priority multiplier for '
'the MST instance <0-15>',
}
],
},
{
'command': 'no spanning-tree instance {instance_id} '
'port-priority {priority}',
'doc': 'Removes the port-priority from the MSTP instance',
'arguments': [
{
'name': 'instance_id',
'doc': 'Specifies the MSTP instance number <1-64>',
},
{
'name': 'priority',
'doc': 'The device priority multiplier for '
'the MST instance <0-15>',
'optional': True
}
],
},
{
'command': 'apply qos schedule-profile \
{schedule_profile_name}',
'doc': 'Apply qos profiles on an interface.',
'arguments': [
{
'name': 'schedule_profile_name',
'doc': 'The schedule profile to apply.'
}
],
},
{
'command': 'no apply qos schedule-profile',
'doc': 'Clears qos profiles from an interface.',
'arguments': [
{
'name': 'schedule_profile_name',
'doc': 'The schedule profile to clear.',
'optional': True
}
],
},
{
'command': 'qos dscp {dscp_map_index}',
'doc': 'Set the dscp override for the port.',
'arguments': [
{
'name': 'dscp_map_index',
'doc': 'The index into the dscp map.'
}
],
},
{
'command': 'no qos dscp',
'doc': 'Remove the dscp override for the port.',
'arguments': [],
},
{
'command': 'qos trust {value}',
'doc': 'Set the qos trust mode for the port.',
'arguments': [
{
'name': 'value',
'doc': 'The qos trust mode to set.'
}
],
},
{
'command': 'no qos trust',
'doc': 'Remove the qos trust mode for the port.',
'arguments': [],
},
{
'command': 'apply access-list {type} {access_list} '
'{direction}',
'doc': 'Apply ACL on interface',
'arguments': [
{
'name': 'type',
'doc': 'Access-list type (e.g., ip or ipv6).'
},
{
'name': 'access_list',
'doc': 'Access-list name.'
},
{
'name': 'direction',
'doc': 'Apply to this traffic direction (in | out).'
}
],
},
{
'command': 'no apply access-list {type} {access_list} '
'{direction}',
'doc': 'Remove ACL from interface',
'arguments': [
{
'name': 'type',
'doc': 'Access-list type (e.g., ip or ipv6).'
},
{
'name': 'access_list',
'doc': 'Access-list name.'
},
{
'name': 'direction',
'doc': 'Apply to this traffic direction (in | out).'
}
],
},
{
'command': 'apply access-list ip {acl_name} in',
'doc': 'Apply ACL on interface to ingress traffic',
'arguments': [
{
'name': 'acl_name',
'doc': 'Access-list name'
}
],
},
{
'command': 'no apply access-list ip {acl_name} in',
'doc': 'Apply no ACL on interface to ingress traffic',
'arguments': [
{
'name': 'acl_name',
'doc': 'Access-list name'
}
],
},
{
'command': 'apply access-list ip {acl_name} out',
'doc': 'Apply ACL on interface to egress traffic',
'arguments': [
{
'name': 'acl_name',
'doc': 'Access-list name'
}
],
},
{
'command': 'no apply access-list ip {acl_name} out',
'doc': 'Apply no ACL on interface to egress traffic',
'arguments': [
{
'name': 'acl_name',
'doc': 'Access-list name'
}
],
},
{
'command': 'vrrp {grpid} address-family {af}',
'doc': 'Set VRRP virtual router id and address-family',
'arguments': [
{
'name': 'grpid',
'doc': 'Virtual router id <1-255>',
},
{
'name': 'af',
'doc': 'Address family <ipv4|ipv6>'
},
],
},
{
'command': 'no vrrp {grpid} address-family {af}',
'doc': 'Remove VRRP virtual router id and address-family',
'arguments': [
{
'name': 'grpid',
'doc': 'Virtual router id <1-255>',
},
{
'name': 'af',
'doc': 'Address family <ipv4|ipv6>'
},
],
},
{
'command': 'mtu {mtu_size}',
'doc': 'Set MTU',
'arguments': [
{
'name': 'mtu_size',
'doc': 'MTU in bytes range <576-9192>'
}
],
},
{
'command': 'no mtu',
'doc': 'Unset MTU',
'arguments': [],
}
]
}),
('config_subinterface', {
'doc': 'Sub-Interface configuration.',
'arguments': [
{
'name': 'portlbl',
'doc': 'Label that identifies a physical interface.'
},
{
'name': 'subint',
'doc': 'Label that identifies a subinterface.'
}
],
'pre_commands': ['config terminal', 'interface {port}.{subint}'],
'post_commands': ['end'],
'commands': [
{
'command': 'ip address {ipv4}',
'doc': 'Set IP address',
'arguments': [
{
'name': 'ipv4',
'doc': 'A.B.C.D/M Subinterface IP address.',
},
],
},
{
'command': 'no ip address {ipv4}',
'doc': 'Unset IP address',
'arguments': [
{
'name': 'ipv4',
'doc': 'A.B.C.D/M Subinterface IP address.',
},
],
},
{
'command': 'vrf attach {vrf_name}',
'doc': 'Mapping port to vrf',
'arguments': [
{
'name': 'vrf_name',
'doc': 'Mapping the port to vrf.',
},
],
},
{
'command': 'no vrf attach {vrf_name}',
'doc': 'Unmapping port from vrf',
'arguments': [
{
'name': 'vrf_name',
'doc': 'Unmapping the port from vrf.',
},
],
},
{
'command': 'ipv6 address {ipv6}',
'doc': 'Set IPv6 address',
'arguments': [
{
'name': 'ipv6',
'doc': 'X:X::X:X/M Subinterface IPv6 address',
},
],
},
{
'command': 'no ipv6 address {ipv6}',
'doc': 'Unset IPv6 address',
'arguments': [
{
'name': 'ipv6',
'doc': 'X:X::X:X/M Subinterface IPv6 address',
},
],
},
{
'command': 'encapsulation dot1Q {vlan_id}',
'doc': 'Set encapsulation type for a subinterface',
'arguments': [
{
'name': 'vlan_id',
'doc': '<1-4094> VLAN identifier.',
},
],
},
{
'command': 'no encapsulation dot1Q {vlan_id}',
'doc': 'Unset encapsulation type for a subinterface',
'arguments': [
{
'name': 'vlan_id',
'doc': '<1-4094> VLAN identifier.',
},
],
},
{
'command': 'shutdown',
'doc': 'Enable a subinterface.',
'arguments': [],
},
{
'command': 'no shutdown',
'doc': 'Disable a subinterface.',
'arguments': [],
},
]
}),
('config_interface_vlan', {
'doc': 'VLAN configuration.',
'arguments': [
{
'name': 'vlan_id',
'doc': 'Vlan id within <1-4094> and should not'
'be an internal vlan.'
}
],
'pre_commands': ['config terminal', 'interface vlan {vlan_id}'],
'post_commands': ['end'],
'commands': [
{
'command': 'ip address {ipv4}',
'doc': 'Set IP address',
'arguments': [
{
'name': 'ipv4',
'doc': 'A.B.C.D/M Interface IP address.',
},
],
},
{
'command': 'no ip address {ipv4}',
'doc': 'Unset IP address',
'arguments': [
{
'name': 'ipv4',
'doc': 'A.B.C.D/M Interface IP address.',
},
],
},
{
'command': 'ip address {ipv4} secondary',
'doc': 'Set secondary IP address',
'arguments': [
{
'name': 'ipv4',
'doc': 'A.B.C.D/M Interface IP address.',
},
],
},
{
'command': 'no ip address {ipv4} secondary',
'doc': 'Unset secondary IP address',
'arguments': [
{
'name': 'ipv4',
'doc': 'A.B.C.D/M Interface IP address.',
},
],
},
{
'command': 'ipv6 address {ipv6}',
'doc': 'Set IPv6 address',
'arguments': [
{
'name': 'ipv6',
'doc': 'X:X::X:X/M Interface IPv6 address',
},
],
},
| |
<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import datetime
import json
import logging
import os
import pickle
import threading
import time
import urllib
import uuid
from functools import wraps
import google.appengine.api.users
import webapp2
from google.appengine.api import taskqueue
from google.appengine.api.datastore_errors import TransactionFailedError
from google.appengine.api.images.images_stub import ImagesServiceStub
from google.appengine.datastore.datastore_rpc import TransactionOptions, ConfigOption
from google.appengine.ext import db, deferred
import influxdb
from influxdb.resultset import ResultSet
SERVER_SOFTWARE = os.environ.get("SERVER_SOFTWARE", "Development")
DEBUG = SERVER_SOFTWARE.startswith('Development')
# THIS PIECE OF CODE MUST BE ON TOP BECAUSE IT MONKEY PATCHES THE BUILTIN USER CLASS
# START MONKEY PATCH
def email_lower(email):
if email is None:
return None
email = email.email() if isinstance(email, google.appengine.api.users.User) else email
email = unicode(email) if not isinstance(email, unicode) else email
return email.lower()
original_constructor = google.appengine.api.users.User.__init__
def __init__(self, *args, **kwargs):
if args:
email = args[0]
if email:
lower_email = email_lower(email)
if lower_email != email:
args = list(args)
args[0] = lower_email
else:
email = kwargs.get('email', None)
if email != None:
lower_email = email_lower(email)
if lower_email != email:
kwargs['email'] = lower_email
original_constructor(self, *args, **kwargs)
google.appengine.api.users.User.__init__ = __init__
# END MONKEY PATCH
# MONKEY PATCH logging
# Add possibility to bring down error levels for deferreds
class _TLocal(threading.local):
def __init__(self):
self.suppress = 0
_tlocal = _TLocal()
del _TLocal
def start_suppressing():
_tlocal.suppress += 1
def stop_suppressing():
if _tlocal.suppress > 0:
_tlocal.suppress -= 1
def reset_suppressing():
_tlocal.suppress = 0
class suppressing(object):
def __enter__(self):
start_suppressing()
return self
def __exit__(self, *args, **kwargs):
stop_suppressing()
_orig_error = logging.error
_orig_critical = logging.critical
def _new_error(msg, *args, **kwargs):
suppress = kwargs.pop('_suppress', True)
if _tlocal.suppress > 0 and suppress:
logging.warning(msg, *args, **kwargs)
else:
_orig_error(msg, *args, **kwargs)
def _new_critical(msg, *args, **kwargs):
suppress = kwargs.pop('_suppress', True)
if _tlocal.suppress > 0 and suppress:
logging.warning(msg, *args, **kwargs)
else:
_orig_critical(msg, *args, **kwargs)
def _new_exception(msg, *args, **kwargs):
suppress = kwargs.pop('_suppress', True)
if _tlocal.suppress > 0 and suppress:
logging.warning(msg, *args, exc_info=1, **kwargs)
else:
_orig_error(msg, *args, exc_info=1, **kwargs)
logging.error = _new_error
logging.critical = _new_critical
logging.exception = _new_exception
class StubsFilter(logging.Filter):
def filter(self, record):
# Get rid of the annoying 'Sandbox prevented access to file' warnings
return 'stubs.py' != record.filename
logging.root.addFilter(StubsFilter())
# MONKEY PATCH db
# Add possibility to run post-transaction actions
class __TLocal(threading.local):
def __init__(self):
self.propagation = False
_temp_transaction_options = __TLocal()
del __TLocal
_orig_run_in_transaction_options = db.run_in_transaction_options
_options = [attr for attr in dir(TransactionOptions) if isinstance(getattr(TransactionOptions, attr), ConfigOption)]
_clone_options = lambda o: {attr: getattr(o, attr) for attr in _options}
_default_options = _clone_options(db.create_transaction_options())
def _wrap_run_in_transaction_func(is_retries, is_options):
@wraps(_orig_run_in_transaction_options)
def wrapped(*args, **kwargs):
if is_options:
options = _clone_options(args[0])
args = args[1:]
else:
options = dict(_default_options)
if is_retries:
retries = args[0]
args = args[1:]
else:
retries = options['retries']
options['retries'] = 0
if options.get('propagation') is None and _temp_transaction_options.propagation:
options['propagation'] = db.ALLOWED
options = db.create_transaction_options(**options)
if db.is_in_transaction():
return _orig_run_in_transaction_options(options, *args, **kwargs)
if not retries:
retries = 3
def run(transaction_guid):
max_tries = retries + 1
count = 0
while count < max_tries:
count += 1
start = time.time()
try:
return _orig_run_in_transaction_options(options, *args, **kwargs)
except (TransactionFailedError, db.Timeout) as e:
if isinstance(e, db.Timeout) and type(e) != db.Timeout:
raise e # only retrying in case of db.Timeout exceptions, not subclasses
if count == max_tries:
raise e
transactions.post_transaction_actions.reset(transaction_guid)
logging.info("%s: %s. Retrying... (%s)", e.__class__.__name__, e.message, count)
sleep_time = 1.1 - (time.time() - start)
if sleep_time > 0:
logging.info("Sleeping %s seconds ....", sleep_time)
time.sleep(sleep_time)
from rogerthat.utils import transactions
if db.is_in_transaction():
transaction_guid = transactions.post_transaction_actions.get_current_transaction_guid()
else:
transaction_guid = str(uuid.uuid4())
transactions.post_transaction_actions.set_current_transaction_guid(transaction_guid)
try:
r = run(transaction_guid)
except:
transactions.post_transaction_actions.finalize(success=False, transaction_guid=transaction_guid)
raise
try:
transactions.post_transaction_actions.finalize(success=True, transaction_guid=transaction_guid)
except:
logging.error("Caught exception in rpc.transaction_done", exc_info=1, _suppress=False)
return r
return wrapped
db.run_in_transaction = _wrap_run_in_transaction_func(is_retries=False, is_options=False)
db.run_in_transaction_custom_retries = _wrap_run_in_transaction_func(is_retries=True, is_options=False)
db.run_in_transaction_options = _wrap_run_in_transaction_func(is_retries=False, is_options=True)
# END MONKEY PATCH
def _allow_transaction_propagation(func, *args, **kwargs):
original_propagation_value = _temp_transaction_options.propagation
_temp_transaction_options.propagation = True
try:
return func(*args, **kwargs)
finally:
_temp_transaction_options.propagation = original_propagation_value
db.allow_transaction_propagation = _allow_transaction_propagation
del _allow_transaction_propagation
# MONKEY PATCH json.dump & json.dumps to eliminate useless white space
_orig_json_dumps = json.dumps
def _new_json_dumps(*args, **kwargs):
if len(args) < 8:
kwargs.setdefault("separators", (',', ':'))
try:
return _orig_json_dumps(*args, **kwargs)
except Exception as e:
logging.debug(args)
raise
json.dumps = _new_json_dumps
_orig_json_dump = json.dump
def _new_json_dump(*args, **kwargs):
if len(args) < 8:
kwargs.setdefault("separators", (',', ':'))
return _orig_json_dump(*args, **kwargs)
json.dump = _new_json_dump
# END MONKEY PATCH
# MONKEY PATCH os.path.expanduser & os.path.expanduser to avoid using
# unspported os.path.getuserid
_orig_os_path_expanduser = os.path.expanduser
def _new_os_path_expanduser(path):
return path
os.path.expanduser = _new_os_path_expanduser
# END MONKEY PATCH
# MONKEY PATCH deferred.defer
_old_deferred_defer = deferred.defer
def _new_deferred_defer(obj, *args, **kwargs):
# Sets current user and fixes an issue where the transactional argument wasn't supplied when the task is too large
from rogerthat.rpc import users
from rogerthat.utils import get_backend_service
from mcfw.consts import MISSING
if users.get_current_deferred_user() == MISSING:
kwargs['__user'] = users.get_current_user()
else:
kwargs['__user'] = users.get_current_deferred_user()
taskargs = dict((x, kwargs.pop(("_%s" % x), None))
for x in ("countdown", "eta", "name", "target",
"retry_options"))
taskargs["url"] = kwargs.pop("_url", deferred.deferred._DEFAULT_URL)
transactional = kwargs.pop("_transactional", False)
taskargs["headers"] = dict(deferred.deferred._TASKQUEUE_HEADERS)
taskargs["headers"].update(kwargs.pop("_headers", {}))
queue = kwargs.pop("_queue", deferred.deferred._DEFAULT_QUEUE)
pickled = deferred.serialize(obj, *args, **kwargs)
if not taskargs["target"] and taskargs["countdown"] is None: # Don't increase too high otherwise keepalive_task will break
taskargs["target"] = get_backend_service()
try:
if DEBUG:
logging.debug('Scheduling task on queue %s: %s.%s\n%s\n%s',
queue,
obj.__module__, obj.__name__,
''.join((', %s' % repr(a) for a in args)),
''.join((', %s=%s' % (k, repr(v)) for k, v in kwargs.iteritems())))
task = taskqueue.Task(payload=pickled, **taskargs)
return task.add(queue, transactional=transactional)
except taskqueue.TaskTooLargeError:
key = deferred.deferred._DeferredTaskEntity(data=pickled).put()
pickled = deferred.deferred.serialize(deferred.deferred.run_from_datastore, str(key))
task = taskqueue.Task(payload=pickled, **taskargs)
# this is the patched line (transactional=transactional)
return task.add(queue, transactional=transactional)
def _new_deferred_run(data):
try:
func, args, kwds = pickle.loads(data)
except Exception, e:
raise deferred.PermanentTaskFailure(e)
else:
from rogerthat.rpc import users
current_user = kwds.pop('__user', None)
if current_user:
users.set_deferred_user(current_user)
try:
from rogerthat.utils import get_current_queue, get_current_version
if DEBUG:
prefix = u'%s -> ' % get_current_version()
else:
prefix = u''
logging.debug('%sQueue: %s\ndeferred.run(%s.%s%s%s)',
prefix,
get_current_queue(),
func.__module__, func.__name__,
"".join((",\n %s" % repr(a) for a in args)),
"".join((",\n %s=%s" % (k, repr(v)) for k, v in kwds.iteritems())))
except:
logging.exception('Failed to log the info of this defer (%s)', func)
try:
return func(*args, **kwds)
except deferred.PermanentTaskFailure:
stop_suppressing()
raise
except:
request = webapp2.get_request()
if request:
execution_count_triggering_error_log = 9
execution_count = request.headers.get('X-Appengine-Taskexecutioncount', None)
if execution_count and int(execution_count) == execution_count_triggering_error_log:
logging.error('This deferred.run already failed %s times!', execution_count, _suppress=False)
raise
finally:
if current_user:
users.clear_user()
deferred.defer = deferred.deferred.defer = _new_deferred_defer
deferred.run = deferred.deferred.run = _new_deferred_run
# END MONKEY PATCH
# MONKEY PATCH expando unindexed properties
_orginal_expando_getattr = db.Expando.__getattribute__
def _new_expando_getattr(self, key):
if key == '_unindexed_properties':
return self.__class__._unindexed_properties.union(self.dynamic_properties())
return _orginal_expando_getattr(self, key)
db.Expando.__getattribute__ = _new_expando_getattr
# END MONKEY PATCH
try:
# disable the annoying AppenginePlatformWarning's
from requests.packages import urllib3
urllib3.disable_warnings()
except ImportError:
pass
try:
import requests # @UnusedImport
try:
import requests_toolbelt.adapters.appengine
requests_toolbelt.adapters.appengine.monkeypatch()
except ImportError:
logging.error('You must include `requests-toolbelt` in requirements.txt when using the `requests` library')
except ImportError:
pass
dummy2 = lambda: None
def _Dynamic_Composite(self, request, response):
"""Implementation of ImagesService::Composite.
Based off documentation of the PIL library at
http://www.pythonware.com/library/pil/handbook/index.htm
Args:
request: ImagesCompositeRequest - Contains image request info.
response: ImagesCompositeResponse - Contains transformed image.
Raises:
ApplicationError: Bad data was provided, likely data about the dimensions.
"""
from PIL import Image
from google.appengine.api import images
from google.appengine.api.images import images_service_pb
from google.appengine.api.images.images_stub import _BackendPremultiplication, _ArgbToRgbaTuple, RGBA
from google.appengine.runtime import apiproxy_errors
if (not request.canvas().width() or not request.canvas().height() or
not request.image_size() or not request.options_size()):
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
if (request.canvas().width() > 4000 or
request.canvas().height() > 4000 or
request.options_size() > images.MAX_COMPOSITES_PER_REQUEST):
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
width = request.canvas().width()
height = request.canvas().height()
color = _ArgbToRgbaTuple(request.canvas().color())
color = _BackendPremultiplication(color)
canvas = Image.new(RGBA, (width, height), color)
sources = []
for image in request.image_list():
sources.append(self._OpenImageData(image))
for options in request.options_list():
if (options.anchor() < images.TOP_LEFT or
options.anchor() > images.BOTTOM_RIGHT):
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
if options.source_index() >= len(sources) or options.source_index() < 0:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
if options.opacity() < 0 or options.opacity() > 1:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
source = sources[options.source_index()]
x_anchor = (options.anchor() % 3) * 0.5
y_anchor = (options.anchor() / 3) * 0.5
x_offset = int(options.x_offset() + x_anchor * (width - source.size[0]))
y_offset = int(options.y_offset() + y_anchor * (height - source.size[1]))
if source.mode == RGBA:
canvas.paste(source, (x_offset, y_offset), source)
else:
# Fix here: alpha must be an integer (and not a float)
alpha = int(options.opacity() * 255)
mask = Image.new('L', source.size, alpha)
canvas.paste(source, (x_offset, y_offset), mask)
response_value = self._EncodeImage(canvas, request.canvas().output())
response.mutable_image().set_content(response_value)
def new_query(self,
query,
params=None,
epoch=None,
expected_response_code=200,
database=None,
| |
type, then it must be explicitly specified in databooks and program books.' % (pop["label"], pop["type"])
for obj_type, df in zip(["comps", "characs", "pars"], [framework.comps, framework.characs, framework.pars]):
for spec_name, spec in zip(df.index, df.to_dict(orient="records")):
if spec_name in self.pops:
raise Exception('Code name "%s" has been used for both a population and a framework quantity - population names must be unique' % (spec_name))
if spec["databook page"] is not None:
if spec_name not in self.tdve:
if not np.isfinite(spec["default value"]):
raise Exception('The databook did not contain a required TDVE table named "%s" (code name "%s")' % (spec["display name"], spec_name))
else:
logger.warning('TDVE table "%s" (code name "%s") is missing from the databook. Using default values from the framework' % (spec["display name"], spec_name))
units = framework.get_databook_units(spec_name)
self.tdve[spec_name] = TimeDependentValuesEntry(spec["display name"], self.tvec.copy(), allowed_units=[units], comment=spec["guidance"])
for pop in self.pops.keys():
self.tdve[spec_name].ts[pop] = TimeSeries(assumption=spec["default value"], units=units)
tdve_page = framework.sheets["databook pages"][0][framework.sheets["databook pages"][0]["datasheet code name"] == spec["databook page"]]["datasheet title"].values[0]
if tdve_page in self.tdve_pages:
self.tdve_pages[tdve_page].append(spec_name)
else:
self.tdve_pages[tdve_page] = [spec_name]
else:
framework_units = framework.get_databook_units(spec_name) # Get the expected databook units
tdve = self.tdve[spec_name]
tdve_sheet = self.get_tdve_page(spec_name)
location = 'Error in TDVE table "%s" on sheet "%s"' % (tdve.name, tdve_sheet)
assert tdve.pop_type in self._pop_types, '%s. Population type "%s" did not match any in the framework' % (location, tdve.pop_type)
required_pops = [x for x, y in self.pops.items() if y["type"] == tdve.pop_type] # The TDVE should contain values for all populations of that type, otherwise cannot construct the ParameterSet. Check that these populations are all present
missing_pops = set(required_pops).difference(tdve.ts.keys())
if missing_pops:
raise Exception("%s. The following populations were not supplied but are required: %s" % (location, missing_pops))
for name, ts in self.tdve[spec_name].ts.items():
assert ts.has_data, "%s. Data values missing for %s (%s)" % (location, tdve.name, name)
assert ts.units is not None, "%s. Units missing for %s (%s)" % (location, tdve.name, name)
if ts.units.strip().lower() != framework_units.strip().lower():
# If the units don't match the framework's 'databook' units, see if they at least match the standard unit (for legacy databooks)
# For compartments and characteristics, the units must match exactly
if obj_type in ["comps", "characs"] or ("format" in spec and spec["format"] is not None and ts.units.lower().strip() != spec["format"].lower().strip()):
assert ts.units == framework_units, '%s. Unit "%s" for %s (%s) does not match the declared units from the Framework (expecting "%s")' % (location, ts.units, tdve.name, name, framework_units)
if obj_type == "par" and spec["timed"] == "y":
assert not ts.has_time_data, "%s. Parameter %s (%s) is marked as a timed transition in the Framework, so it must have a constant value (i.e., the databook cannot contain time-dependent values for this parameter)" % (location, tdve.name, name)
for tdc in self.interpops + self.transfers:
if tdc.from_pop_type is None: # Supply default pop type
tdc.from_pop_type = self._pop_types[0]
assert tdc.from_pop_type in self._pop_types, 'Error in transfer/interaction "%s": from population type "%s" not found in framework. If the framework defines a non-default population type, then it must be explicitly specified in databooks and program books.' % (tdc.full_name, tdc.from_pop_type)
if tdc.to_pop_type is None: # Supply default pop type
tdc.to_pop_type = self._pop_types[0]
assert tdc.to_pop_type in self._pop_types, 'Error in transfer/interaction "%s": to population type "%s" not found in framework. If the framework defines a non-default population type, then it must be explicitly specified in databooks and program books.' % (tdc.full_name, tdc.to_pop_type)
for _, spec in framework.interactions.iterrows():
for tdc in self.interpops:
if tdc.code_name == spec.name:
for (from_pop, to_pop), ts in tdc.ts.items():
assert to_pop in self.pops, 'Population "%s" in "%s" not recognized. Should be one of: %s' % (to_pop, spec.name, self.pops.keys())
assert self.pops[to_pop]["type"] == tdc.to_pop_type, 'Interaction "%s" has to-population type "%s", but contains Population "%s", which is type "%s"' % (tdc.full_name, tdc.to_pop_type, to_pop, self.pops[to_pop]["type"])
assert from_pop in self.pops, 'Population "%s" in "%s" not recognized. Should be one of: %s' % (from_pop, spec.name, self.pops.keys())
assert self.pops[from_pop]["type"] == tdc.from_pop_type, 'Interaction "%s" has from-population type "%s", but contains Population "%s", which is type "%s"' % (tdc.full_name, tdc.from_pop_type, from_pop, self.pops[from_pop]["type"])
assert ts.has_data, "Data values missing for interaction %s, %s->%s" % (spec.name, to_pop, from_pop)
assert ts.units.lower().title() == FS.DEFAULT_SYMBOL_INAPPLICABLE.lower().title(), 'Units error in interaction %s, %s->%s. Interaction units must be "N.A."' % (spec.name, to_pop, from_pop)
break
else:
raise Exception('Required interaction "%s" not found in databook' % spec.name)
for tdc in self.transfers:
for (from_pop, to_pop), ts in tdc.ts.items():
assert to_pop in self.pops, 'Population "%s" in "%s" not recognized. Should be one of: %s' % (to_pop, tdc.full.name, self.pops.keys())
assert self.pops[to_pop]["type"] == tdc.to_pop_type, 'Transfer "%s" has population type "%s", but contains Population "%s", which is type "%s"' % (tdc.full_name, tdc.to_pop_type, to_pop, self.pops[to_pop]["type"])
assert from_pop in self.pops, 'Population "%s" in "%s" not recognized. Should be one of: %s' % (from_pop, tdc.full.name, self.pops.keys())
assert self.pops[from_pop]["type"] == tdc.from_pop_type, 'Transfer "%s" has population type "%s", but contains Population "%s", which is type "%s"' % (tdc.full_name, tdc.from_pop_type, from_pop, self.pops[from_pop]["type"])
assert ts.has_data, "Data values missing for transfer %s, %s->%s" % (tdc.full_name, to_pop, from_pop)
assert ts.units is not None, "Units are missing for transfer %s, %s->%s" % (tdc.full_name, to_pop, from_pop)
return True
def to_workbook(self) -> tuple:
"""
Return an open workbook for the databook
This allows the xlsxwriter workbook to be manipulated prior to closing the
filestream e.g. to append extra sheets. This prevents issues related to cached
data values when reloading a workbook to append or modify content
Warning - the workbook is backed by a BytesIO instance and needs to be closed.
See the usage of this method in the :meth`to_spreadsheet` function.
:return: A tuple (bytes, workbook) with a BytesIO instance and a corresponding *open* xlsxwriter workbook instance
"""
# Initialize the bytestream
f = io.BytesIO()
wb = xw.Workbook(f, {"in_memory": True})
# Open a workbook
self._book = wb
self._book.set_properties({"category": "atomica:databook"})
self._formats = standard_formats(self._book)
self._references = {} # Reset the references dict
# Write the contents
self._write_pops()
self._write_tdve()
self._write_interpops()
self._write_transfers()
# Clean internal variables related to writing the worbkook
self._book = None
self._formats = None
self._references = None
return f, wb
def to_spreadsheet(self) -> sc.Spreadsheet:
"""
Return content as a Sciris Spreadsheet
:return: A :class:`sciris.Spreadsheet` instance
"""
f, wb = self.to_workbook()
wb.close() # Close the workbook to flush any xlsxwriter content
spreadsheet = sc.Spreadsheet(f) # Wrap it in a spreadsheet instance
return spreadsheet
def save(self, fname) -> None:
"""
Save databook to disk
This function provides a shortcut to generate a spreadsheet and immediately save it to disk.
:param fname: File name to write on disk
"""
ss = self.to_spreadsheet()
ss.save(fname)
def add_pop(self, code_name: str, full_name: str, pop_type: str = None) -> None:
"""
Add a population
This will add a population to the databook. The population type should match
one of the population types in the framework
:param code_name: The code name for the new population
:param full_name: The full name/label for the new population
:param pop_type: String with the population type code name
"""
if pop_type is None:
pop_type = self._pop_types[0]
assert pop_type in self._pop_types, 'Population type "%s" not found in framework' % (pop_type)
code_name = code_name.strip()
assert len(code_name) > 1, 'Population code name (abbreviation) "%s" is not valid - it must be at least two characters long' % (code_name)
assert code_name not in self.pops, 'Population with name "%s" already exists' % (code_name)
if code_name.lower() in FS.RESERVED_KEYWORDS:
raise Exception('Population name "%s" is a reserved keyword' % (code_name.lower()))
self.pops[code_name] = {"label": full_name, "type": pop_type}
for interaction in self.transfers + self.interpops:
if interaction.from_pop_type == pop_type:
interaction.from_pops.append(code_name)
if interaction.to_pop_type == pop_type:
interaction.to_pops.append(code_name)
for tdve in self.tdve.values():
# Since TDVEs in databooks must have the unit set in the framework, all ts objects must share the same units
# And, there is only supposed to be one type of unit allowed for TDVE tables (if the unit is empty, it will be 'N.A.')
# so can just pick the first of the allowed units
if tdve.pop_type == pop_type:
tdve.ts[code_name] = TimeSeries(units=tdve.allowed_units[0])
def rename_pop(self, existing_code_name: str, new_code_name: str, new_full_name: str) -> None:
"""
Rename a population
:param existing_code_name: Existing code name of a population
:param new_code_name: New code name to assign
| |
<reponame>Loop3D/LoopStructural<filename>LoopStructural/visualisation/model_plotter.py
from LoopStructural.utils import getLogger
from LoopStructural.utils import LoopImportError
logger = getLogger(__name__)
import numpy as np
try:
from skimage.measure import marching_cubes
except ImportError:
logger.warning("Using depreciated version of scikit-image")
from skimage.measure import marching_cubes_lewiner as marching_cubes
from LoopStructural.modelling.features import GeologicalFeature
from LoopStructural.utils.helper import create_surface, get_vectors, create_box
class BaseModelPlotter:
def __init__(self, model = None):
"""
Parameters
----------
model
"""
self.model = model
self.default_vector_symbol = 'disk'
self.default_cmap = 'rainbow'
@property
def model(self):
return self._model
@model.setter
def model(self, model):
if model is not None:
self.bounding_box = np.array(model.bounding_box)
self.nsteps = np.array(model.nsteps)
self._model = model
self._nelements = self.nsteps[0]*self.nsteps[1]*self.nsteps[2]
logger.debug("Using bounding box from model")
@property
def nelements(self):
"""The number of elements to use for evaluating the isosurface
Returns
-------
nelements : int
number of elements to use for isosurfacing
"""
return self._nelements
@nelements.setter
def nelements(self, nelements : int):
"""Setter for nelements, automatically caculates the number of equally sized elements
to isosurface. Better than specifying step distance manually
Parameters
----------
nelements : int
[description]
"""
box_vol = (self.bounding_box[1, 0]-self.bounding_box[0, 0]) * (self.bounding_box[1, 1]-self.bounding_box[0, 1]) * (self.bounding_box[1, 2]-self.bounding_box[0, 2])
ele_vol = box_vol / nelements
# calculate the step vector of a regular cube
step_vector = np.zeros(3)
step_vector[:] = ele_vol ** (1. / 3.)
# step_vector /= np.array([1,1,2])
# number of steps is the length of the box / step vector
nsteps = np.ceil((self.bounding_box[1, :] - self.bounding_box[0, :]) / step_vector).astype(int)
self.nsteps = nsteps
logger.info("Using grid with dimensions {} {} {}".format(nsteps[0],nsteps[1],nsteps[2]))
@property
def nsteps(self):
return self._nsteps
@nsteps.setter
def nsteps(self,nsteps):
self._nsteps = np.array(nsteps)
def _add_surface(self,tri,vertices, name,colour='red', paint_with=None, **kwargs):
"""Virtual function to be overwritten by subclasses for adding surfaces to the viewer
Parameters
----------
tri : numpy array
indices of the triangles
vertices : numy array
vertices of the surface
name : string
name of the surface in the viewer
colour : str, optional
matplotlib colour, by default 'red'
paint_with : GeologicalFeature, optional
geological feature to evaluate on vertices, by default None
"""
pass
def _add_points(self, points, name, value= None, **kwargs):
"""Virtual function to be overwritten by subclasses for adding points to the viewer
Parameters
----------
points : np.array
location of the points
name : str
name of the points in the viewer
value : np.array, optional
value to assign to the points
"""
pass
def _add_vector_marker(self, location, vector, name, symbol_type='arrow',**kwargs):
"""Virtual function to be overwritten by subclasses for adding vectors to the viewer
Parameters
----------
location : numpy array
location array
vector : numpy array
vector component array
symbol_type : str, optional
type of glyph to display the vector by, by default 'arrow'
name : string
name of the object in the visualisation
"""
pass
def add_section(self, geological_feature=None, axis='x', value=None, paint_with=None, **kwargs):
"""
Plot a section/map thru the model and paint with a geological feature
Parameters
----------
geological_feature : Geological feature
The feature to paint the section with
axis : string
which axis, x,y,z
value : float
Where to make the section
kwargs
additional kwargs passes to lavavu for colourmaps etc
Returns
-------
"""
if axis == 'x':
tri, yy, zz = create_surface(self.bounding_box[:, [1, 2]], self.nsteps[[1, 2]])
xx = np.zeros(zz.shape)
if value is None:
value = np.nanmean(self.bounding_box[:, 0])
xx[:] = value
if axis == 'y':
tri, xx, zz = create_surface(self.bounding_box[:, [0, 2]], self.nsteps[[0, 2]])
yy = np.zeros(xx.shape)
if value is None:
value = np.nanmean(self.bounding_box[:, 1])
yy[:] = value
if axis == 'z':
tri, xx, yy = create_surface(self.bounding_box[:, 0:2], self.nsteps[0:2])
zz = np.zeros(xx.shape)
if value is None:
value = np.nanmean(self.bounding_box[:, 2])
zz[:] = value
name = 'nothing'
if geological_feature == 'model' and self.model is not None:
name = kwargs.get('name','model_section')
if paint_with == None:
paint_with = lambda xyz: self.model.evaluate_model(xyz,scale=False)
elif geological_feature is not None:
name = kwargs.get('name', geological_feature.name)
paint_with = geological_feature
name = '{}_section_at_{}_of_{}'.format(axis,value,name)
colour = kwargs.get('colour', 'red')
# create an array to evaluate the feature on for the section
points = np.zeros((len(xx), 3)) #
points[:, 0] = xx
points[:, 1] = yy
points[:, 2] = zz
# set the surface to be painted with the geological feature, but if a painter is specified, use that instead
# if 'paint_with' not in kwargs:
# kwargs['paint_with'] = geological_feature
self._add_surface(self.model.rescale(points,inplace=False),tri , name, colour=colour, paint_with=paint_with, **kwargs)
def add_isosurface(self,
geological_feature,
value = None,
isovalue=None,
paint_with=None,
slices=None,
colour='red',
nslices=None,
cmap=None,
filename=None,
names=None,
colours=None,
opacity=None,
function=None,
**kwargs):
""" Plot the surface of a geological feature
Parameters
----------
geological_feature : GeologicalFeature
[description]
value : float, optional
value of the scalar field to isosurface
isovalue : float, optional
value of the scalar field to isosurface, by default None
paint_with : GeologicalFeature, optional
a geological feature to paint the surface with its evaluate_value results, by default None
slices : list, optional
values to isosurface, by default None
colour : string, optional
matplotlib color, by default None
nslices : int, optional
number of slices to evenly distribute in the model, by default None
cmap : string, optional
matplotlib colormap, by default None
names: list, optional
list of names same length as slices
colours: list, optional
list of colours same length as slices
opacity: double, optional
change the opacity of the surface(s)
callback_function:
called with verts, tri and surface name - e.g.
callback_function(verts,tri,name)
Returns
-------
[type]
[description]
"""
if geological_feature is None:
logger.error("Cannot add isosurface GeologicalFeature does not exist")
# update the feature to make sure its current
# do isosurfacing of support using marching tetras/cubes
x = np.linspace(self.bounding_box[0, 0], self.bounding_box[1, 0], self.nsteps[0])
y = np.linspace(self.bounding_box[0, 1], self.bounding_box[1, 1], self.nsteps[1])
z = np.linspace(self.bounding_box[1, 2], self.bounding_box[0, 2], self.nsteps[2])
xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')
points = np.array([xx.flatten(), yy.flatten(), zz.flatten()]).T
val = geological_feature.evaluate_value(points)
mean_val = np.nanmean(val)#geological_feature.mean()
max_val = np.nanmax(val)#geological_feature.max()
min_val = np.nanmin(val)#geological_feature.min()
if paint_with is not None and 'vmin' not in kwargs and 'vmax' not in kwargs:
paint_val = np.zeros(points.shape[0])
if isinstance(paint_with, GeologicalFeature):
paint_val = paint_with.evaluate_value(points)
if callable(paint_with):
paint_val = paint_with(points)
# get the stats to check what we are plotting
kwargs['vmin'] = np.nanmin(paint_val)#geological_feature.min()
kwargs['vmax'] = np.nanmax(paint_val)#geological_feature.max()
# set default parameters
slices_ = [mean_val]
painter = None
voxet = None
tris = None
nodes = None
# parse kwargs for parameters
if isovalue is not None:
slices_ = [isovalue]
if value is not None:
slices_ = [value]
if slices is not None:
slices_ = slices
if nslices is not None:
var = max_val - min_val
# buffer slices by 5%
slices_ = np.linspace(min_val + var * 0.05,
max_val - var * 0.05,
nslices)
base_name = kwargs.pop('name',geological_feature.name)
region = kwargs.get('region', None)
if region is not None:
val[~region(np.array([xx.flatten(), yy.flatten(), zz.flatten()]).T)] = np.nan
if self.model.dtm is not None:
xyz=self.model.rescale(np.array([xx.flatten(), yy.flatten(), zz.flatten()]).T,inplace=False)
dtmv = self.model.dtm(xyz[:,:2])
val[xyz[:,2]>dtmv] = np.nan
step_vector = np.array([x[1] - x[0], y[1] - y[0], z[1] - z[0]])
for i, isovalue in enumerate(slices_):
logger.info("Creating isosurface of %s at %f" % (geological_feature.name, isovalue))
if isovalue > np.nanmax(val) or isovalue < np.nanmin(val):
logger.warning("Isovalue doesn't exist inside bounding box")
continue
try:
verts, faces, normals, values = marching_cubes(
val.reshape(self.nsteps, order='C'),
isovalue,
spacing=step_vector)
verts += np.array([self.bounding_box[0, 0], self.bounding_box[0, 1], self.bounding_box[1, 2]])
self.model.rescale(verts)
except (ValueError, RuntimeError) as e:
print(e)
logger.warning("Cannot isosurface {} at {}, skipping".format(geological_feature.name,isovalue))
continue
name = '{}_{}'.format(base_name,isovalue)
if names is not None and len(names) == len(slices_):
name = names[i]
if colours is not None and len(colours) == len(slices_):
colour = colours[i]
if function is not None:
function(verts,faces,name)
paint_with_value = None
if paint_with == geological_feature:
paint_with_value = isovalue
self._add_surface(verts, faces, name, colour=colour, opacity=opacity, paint_with=paint_with,paint_with_value=paint_with_value,cmap=cmap,**kwargs)
def add_scalar_field(self,
geological_feature,
name=None,
cmap='rainbow',
vmin=None,
vmax = None,
opacity=None,
paint_with=None,
**kwargs):
"""Add a block the size of the model area painted with the scalar field value
Parameters
----------
geological_feature : GeologicalFeature
the geological feature to colour the scalar field by
name : string, optional
Name of the object for lavavu, needs to be unique for the viewer object, by default uses feature name
cmap : str, optional
mpl colourmap reference, by default 'rainbow'
vmin : double, optional
minimum value | |
<gh_stars>100-1000
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import json
import os
from collections import namedtuple
from tempfile import gettempdir
import click
import yaml
from ...console import CONTEXT_SETTINGS, abort, echo_debug, echo_info, set_debug
from .constants import MIB_COMPILED_URL, MIB_SOURCE_URL
@click.command(context_settings=CONTEXT_SETTINGS, short_help='Generate an SNMP profile from a collection of MIB files')
@click.argument('mib_files', nargs=-1)
@click.option('-f', '--filters', help='Path to OIDs filter', default=None)
@click.option('-a', '--aliases', help='Path to metric tag aliases', default=None)
@click.option('--debug', '-d', help='Include debug output', is_flag=True)
@click.option('--interactive', '-i', help='Prompt to confirm before saving to a file', is_flag=True)
@click.option(
'--source',
'-s',
help='Source of the MIBs files. Can be a url or a path for a directory',
default=MIB_SOURCE_URL,
)
@click.option(
'--compiled_mibs_path',
'-c',
help='Source of compiled MIBs files. Can be a url or a path for a directory',
default=MIB_COMPILED_URL,
)
@click.pass_context
def generate_profile_from_mibs(ctx, mib_files, filters, aliases, debug, interactive, source, compiled_mibs_path):
"""
Generate an SNMP profile from MIBs. Accepts a directory path containing mib files
to be used as source to generate the profile, along with a filter if a device or
family of devices support only a subset of oids from a mib.
filters is the path to a yaml file containing a collection of MIBs, with their list of
MIB node names to be included. For example:
```yaml
RFC1213-MIB:
- system
- interfaces
- ip
CISCO-SYSLOG-MIB: []
SNMP-FRAMEWORK-MIB:
- snmpEngine
```
Note that each `MIB:node_name` correspond to exactly one and only one OID. However, some MIBs report legacy nodes
that are overwritten.
To resolve, edit the MIB by removing legacy values manually before loading them with this profile generator. If a
MIB is fully supported, it can be omitted from the filter as MIBs not found in a filter will be fully loaded.
If a MIB is *not* fully supported, it can be listed with an empty node list, as `CISCO-SYSLOG-MIB` in the example.
`-a, --aliases` is an option to provide the path to a YAML file containing a list of aliases to be
used as metric tags for tables, in the following format:
```yaml
aliases:
- from:
MIB: ENTITY-MIB
name: entPhysicalIndex
to:
MIB: ENTITY-MIB
name: entPhysicalName
```
MIBs tables most of the time define a column OID within the table, or from a different table and even different MIB,
which value can be used to index entries. This is the `INDEX` field in row nodes. As an example,
entPhysicalContainsTable in ENTITY-MIB
```txt
entPhysicalContainsEntry OBJECT-TYPE
SYNTAX EntPhysicalContainsEntry
MAX-ACCESS not-accessible
STATUS current
DESCRIPTION
"A single container/'containee' relationship."
INDEX { entPhysicalIndex, entPhysicalChildIndex }
::= { entPhysicalContainsTable 1 }
```
or its json dump, where `INDEX` is replaced by indices
```json
"entPhysicalContainsEntry": {
"name": "entPhysicalContainsEntry",
"oid": "1.3.6.1.2.1.47.1.3.3.1",
"nodetype": "row",
"class": "objecttype",
"maxaccess": "not-accessible",
"indices": [
{
"module": "ENTITY-MIB",
"object": "entPhysicalIndex",
"implied": 0
},
{
"module": "ENTITY-MIB",
"object": "entPhysicalChildIndex",
"implied": 0
}
],
"status": "current",
"description": "A single container/'containee' relationship."
},
```
Sometimes indexes are columns from another table, and we might want to use another column as it could have more
human readable information - we might prefer to see the interface name vs its numerical table index. This can be
achieved using metric_tag_aliases
Return a list of SNMP metrics and copy its yaml dump to the clipboard
Metric tags need to be added manually
"""
if debug:
set_debug()
from pysmi import debug
debug.setLogger(debug.Debug('all'))
# ensure at least one mib file is provided
if len(mib_files) == 0:
abort('🙄 no mib file provided, need at least one mib file to generate a profile')
# create a list of all mib files directories and mib names
source_directories = set()
mibs = set()
for file in mib_files:
source_directories.add(os.path.dirname(file))
mibs.add(os.path.splitext(os.path.basename(file))[0])
# create a tmp dir for compiled json mibs
json_destination_directory = os.path.join(gettempdir(), 'mibs')
if not os.path.exists(json_destination_directory):
os.mkdir(json_destination_directory)
profile_oid_collection = {}
# build profile
for oid_node in _extract_oids_from_mibs(
list(mibs), list(source_directories), json_destination_directory, source, compiled_mibs_path, filters
):
if oid_node.node_type == 'table':
_add_profile_table_node(profile_oid_collection, oid_node)
elif oid_node.node_type == 'row':
# requires
_add_profile_row_node(
profile_oid_collection,
oid_node,
os.path.dirname(mib_files[0]),
metric_tag_aliases_path=aliases,
json_mib_directory=json_destination_directory,
source=source,
compiled_mibs_path=compiled_mibs_path,
)
elif oid_node.node_type == 'column':
_add_profile_column_node(profile_oid_collection, oid_node)
elif oid_node.node_type == 'scalar':
_add_profile_scalar_node(profile_oid_collection, oid_node)
echo_info('{} metrics found'.format(len(profile_oid_collection.values())))
yaml_data = yaml.dump({'metrics': list(profile_oid_collection.values())}, sort_keys=False)
if not interactive or click.confirm('Save to file?'):
output_filename = 'metrics.yaml'
with open(output_filename, 'w') as f:
f.write(yaml_data)
echo_info('Metrics saved to {}'.format(output_filename))
echo_debug(yaml.dump({'metrics': list(profile_oid_collection.values())}, sort_keys=False))
class OidNodeInvalid(Exception):
"""Missing OID, name or class in oid node"""
pass
OidTableIndex = namedtuple('OidIndex', ['index_module', 'index_name'])
JSON_NODE_PROP_CLASS = 'class'
JSON_NODE_PROP_DESCRIPTION = 'description'
JSON_NODE_PROP_INDICES = 'indices'
JSON_NODE_PROP_MAX_ACCESS = 'maxaccess'
JSON_NODE_PROP_MIB = 'mib'
JSON_NODE_PROP_NAME = 'name'
JSON_NODE_PROP_NODE_TYPE = 'nodetype'
JSON_NODE_PROP_OID = 'oid'
JSON_NODE_INDEX_NODE_PROP_MODULE = 'module'
JSON_NODE_INDEX_NODE_PROP_OBJECT = 'object'
class OidNode:
def __init__(self, mib, mib_json_node):
"""
Creates an oid node from a mib and a mib json node
Example of mib json node:
```json
{
"name": "mIBMinorVersionNumber",
"oid": "1.3.6.1.4.1.674.10892.1.1.2",
"nodetype": "scalar",
"class": "objecttype",
"syntax": {
"type": "DellUnsigned8BitRange",
"class": "type"
},
"maxaccess": "read-only",
"status": "mandatory",
"description":
"0001.0002 This attribute defines the minor version number of the Dell Enterprise Server Group MIB ."
}
```
"""
expected_json_node_props = [JSON_NODE_PROP_OID, JSON_NODE_PROP_NAME, JSON_NODE_PROP_CLASS]
if not all(json_prop in mib_json_node for json_prop in expected_json_node_props):
raise OidNodeInvalid
self.mib_class = mib_json_node[JSON_NODE_PROP_CLASS]
# The OBJECT-TYPE is defined by SNMP v1 and is used as a container for
# storing information about the managed device, or some measured value on the device.
# More details:
# https://www.ibm.com/support/knowledgecenter/en/SSSHTQ_8.1.0/com.ibm.netcool_OMNIbus.doc_8.1.0/omnibus/wip/ua_mibmgr/reference/omn_ref_mib_mibobjects.html
if not self.is_object:
return
self.name = mib_json_node[JSON_NODE_PROP_NAME]
self.oid = mib_json_node[JSON_NODE_PROP_OID]
self.mib = mib
self.max_access = None
if JSON_NODE_PROP_MAX_ACCESS in mib_json_node:
self.max_access = mib_json_node[JSON_NODE_PROP_MAX_ACCESS]
self.node_type = None
if JSON_NODE_PROP_NODE_TYPE in mib_json_node:
self.node_type = mib_json_node[JSON_NODE_PROP_NODE_TYPE]
self.description = None
if JSON_NODE_PROP_DESCRIPTION in mib_json_node:
self.description = mib_json_node[JSON_NODE_PROP_DESCRIPTION]
self.indices = None
if JSON_NODE_PROP_INDICES in mib_json_node:
indices = mib_json_node[JSON_NODE_PROP_INDICES]
self.indices = []
for item in indices:
module = item[JSON_NODE_INDEX_NODE_PROP_MODULE]
obj = item[JSON_NODE_INDEX_NODE_PROP_OBJECT]
self.indices.append(OidTableIndex(index_module=module, index_name=obj))
LEAVE_NODE_TYPES = {'table', 'row', 'column', 'scalar'}
@property
def is_middle_node(self):
return self.node_type not in self.LEAVE_NODE_TYPES
@property
def is_unknown_type(self):
return self.node_type is None
@property
def is_object(self):
return self.mib_class == 'objecttype'
def _compile_mib_to_json(mib, source_mib_directories, destination_directory, source, compiled_mibs_path):
from pysmi.borrower import AnyFileBorrower
from pysmi.codegen import JsonCodeGen
from pysmi.compiler import MibCompiler
from pysmi.parser import SmiV1CompatParser
from pysmi.reader import getReadersFromUrls
from pysmi.searcher import AnyFileSearcher, StubSearcher
from pysmi.writer import FileWriter
mib_stubs = JsonCodeGen.baseMibs
compile_documentation = True
# Compiler infrastructure
code_generator = JsonCodeGen()
file_writer = FileWriter(destination_directory).setOptions(suffix='.json')
mib_compiler = MibCompiler(SmiV1CompatParser(tempdir=''), code_generator, file_writer)
# use source_mib_directories as mibs source
sources = [source]
sources.extend(source_mib_directories)
mib_compiler.addSources(*getReadersFromUrls(*sources, **dict(fuzzyMatching=True)))
searchers = [AnyFileSearcher(destination_directory).setOptions(exts=['.json']), StubSearcher(*mib_stubs)]
mib_compiler.addSearchers(*searchers)
# borrowers, aka compiled mibs source
borrowers = [
AnyFileBorrower(borrower_reader, genTexts=True).setOptions(exts=['.json'])
for borrower_reader in getReadersFromUrls(*[compiled_mibs_path], **dict(lowcaseMatching=False))
]
mib_compiler.addBorrowers(borrowers)
processed = mib_compiler.compile(
mib,
**dict(
noDeps=False,
rebuild=False,
dryRun=False,
dstTemplate=None,
genTexts=compile_documentation,
textFilter=False and (lambda symbol, text: text) or None,
writeMibs=True,
ignoreErrors=False,
)
)
return processed
def _get_reader_from_source(source):
from pysmi.reader.localfile import FileReader
if os.path.exists(source):
return FileReader(source)
return _get_reader_from_url(source)
def _get_reader_from_url(url):
from urllib.parse import urlparse
from pysmi.reader.httpclient import HttpReader
if not (url.startswith('//') or url.startswith('http://') or url.startswith('https://')):
url = "//" + url
url_parsed = urlparse(url)
url_host = url_parsed.hostname
url_locationTemplate = url_parsed.path
port = 80
if url_parsed.port:
port = url_parsed.port
return HttpReader(url_host, port, url_locationTemplate)
def _load_json_module(source_directory, mib):
try:
with open(os.path.join(source_directory, mib + '.json')) as mib_json:
return json.load(mib_json)
except FileNotFoundError:
return None
def _load_module_or_compile(mib, source_directories, json_mib_directory, source, compiled_mibs_path):
# try loading the json mib, if already compiled
echo_debug('⏳ Loading mib {}'.format(mib))
mib_json = _load_json_module(json_mib_directory, mib)
if mib_json is not None:
echo_debug('✅ Mib {} loaded'.format(mib))
return mib_json
# compile and reload
echo_debug('⏳ Compile mib {}'.format(mib))
processed = _compile_mib_to_json(mib, source_directories, json_mib_directory, source, compiled_mibs_path)
echo_debug('✅ Mib {} compiled: {}'.format(mib, processed[mib]))
if processed[mib] != 'missing':
mib_json = _load_json_module(json_mib_directory, mib)
return mib_json
return None
def _find_oid_by_name(mib, oid_name, source_directories, json_mib_directory, source, compiled_mibs_path):
mib_json = _load_module_or_compile(mib, source_directories, json_mib_directory, source, compiled_mibs_path)
if mib_json is None:
return None
if oid_name not in mib_json:
return None
return mib_json[oid_name]['oid']
def _find_name_by_oid(mib, oid, source_directories, json_mib_directory, source, compiled_mibs_path):
mib_json = _load_module_or_compile(mib, source_directories, json_mib_directory, source, compiled_mibs_path)
if mib_json is None:
return None
for oid_name in mib_json:
if JSON_NODE_PROP_OID in mib_json[oid_name] and mib_json[oid_name][JSON_NODE_PROP_OID] == oid:
return oid_name
return None
def _filter_mib_oids(mib, json_mib, filter_data):
# skip filtering if no filter is provided for this mib
if filter_data is None or mib not in filter_data:
return json_mib
filtered_json_oids = {}
for filter_oid_name in filter_data[mib]:
# recursively add oids under filter_oid_name
if filter_oid_name not in json_mib:
continue
# add only oids under filter_oid
# | |
# -*- coding: utf-8 -*-
"""
Author
------
<NAME>
Email
-----
<EMAIL>
Created on
----------
- Fri Nov 25 12:53:24 2016
Modifications
-------------
- re-organized on Tue May 23 21:00:00 2017
Aims
----
- utils for apertures
"""
import numpy as np
from .background import apbackground
from .extract import (get_aperture_section, extract_profile, extract_aperture, extract_all,
make_normflat, extract_profile_simple)
from .trace import trace_naive_max
# ###################################################### #
# Here goes the code that will be used after 2017.04.27 #
# ###################################################### #
class Aperture(object):
""" trace apertures from FLAT or SCI images
The Aperture class defines the framework of Aperture instances.
However, multiple choices of tracing methods are provided, such as
1> canny edge detector (for SONG FLAT, implemented)
2> find local maximum (for HRS, implemented but not integrated)
Other possible methods that could be used to detect apertures:
1> (Generalized) Hough transformation
2> Sobel operator?
3> generalized find local maximum (along an axis but using multiple pixels)
4> peak local max, from skimage
5> clustering? sounds infeasible
6> xcorr. cross-correlation between columns
"""
# image info
imshape = None
x = None
y = None
mx = None
my = None
# trace apertures
nap = 0 # number of apertures found
npix = 0
istraced = False # True if traced
method = "" # method used to trace apertures
trace_details = None # raw data maintained during tracing
# aperture edges, centers and width
ap_lower = None
ap_upper = None
ap_center = None
ap_width = 0
# fit apertures & interpolate
ispolyfitted = False
polydeg = 0
# fitted polynomial coefs
ap_upper_chebcoef = None
ap_lower_chebcoef = None
ap_center_chebcoef = None # center is not fitted in case of canny method
# interpolated edge & center
ap_upper_interp = None
ap_lower_interp = None
ap_center_interp = None
def __init__(self, ap_center=np.array([[]]), ap_width=15):
""" initialize with traces
Parameters
----------
ap_center:
aperture center (n_ap x n_pix)
ap_width:
aperture width
"""
ap_center = np.asarray(ap_center, float)
self.nap, self.npix = ap_center.shape
self.ap_center = ap_center
if ap_center is not None:
self.ap_lower = ap_center - ap_width
self.ap_upper = ap_center + ap_width
self.ap_width = ap_width
return
@staticmethod
def trace(flat, method="naive", ap_width=15, polydeg=4, **kwargs):
""" trace apertures for FLAT with a specified method
Example
-------
>>> from twodspec.aperture import Aperture
>>> ap = Aperture.trace(flat, method="naive", polydeg=4, sigma=7, maxdev=7, ap_width=15)
>>> ap = Aperture.trace(flat, method="canny", polydeg=4, sigma=7, maxdev=7, ap_width=15)
Parameters
----------
flat : ndarray
FLAT.
method : str, optional
{"naive", "canny", "", None}. The default is None.
ap_width : float, optional
the half width of the aperture. The default is 15.
polydeg : int, optional
The order of polynomial fitting to apertures. The default is 4.
**kwargs :
will be passed to trace method.
Returns
-------
ap : Aperture instance
The Aperture results.
"""
# assert method is valid
assert method in {None, "", "canny", "naive"}
# trace apertures
print("@Aperture: tracing apertures using [{0}] method".format(method), end="")
# return null Aperture instance
if method is None or method == "":
# initialization
ap = Aperture()
# get image info
ap.get_image_info(flat)
return ap
elif method == "canny":
# 1. canny edge detector
results, details = trace_canny_col(flat, details=True, verbose=False, sigma=kwargs["sigma"])
ap = Aperture(ap_center=results["ap_center"], ap_width=ap_width)
ap.get_image_info(flat)
ap.trace_details = details
elif method == "naive":
# 2. naive max method
ap_center = trace_naive_max(flat, sigma=kwargs["sigma"], maxdev=kwargs["maxdev"])
ap = Aperture(ap_center=ap_center, ap_width=ap_width)
ap.get_image_info(flat)
else:
# otherwise
print("\n@Aperture: invalid method {0}".format(method))
return Aperture()
# change status
ap.method = method
ap.istraced = True
# verbose
print(" >>> {0} apertures found!".format(ap.nap))
# polyfit
if polydeg is not None:
ap.polyfit(np.int(polydeg))
return ap
def get_image_info(self, image):
""" get image information """
if not isinstance(image, np.ndarray):
image = np.array(image)
self.imshape = image.shape
self.x = np.arange(self.imshape[1], dtype=int)
self.y = np.arange(self.imshape[0], dtype=int)
self.mx, self.my = np.meshgrid(self.x, self.y)
return
def polyfit(self, deg=4):
""" fit using chebyshev polynomial for adopted apertures """
# interpolated edges
self.polydeg = deg
n_row = self.imshape[0]
nap = self.nap
ap_col_interp = np.arange(0, n_row, dtype=int)
ap_upper_interp = [] # interpolated
ap_lower_interp = []
ap_center_interp = []
ap_upper_chebcoef = [] # chebcoef
ap_lower_chebcoef = []
ap_center_chebcoef = []
for i in range(nap):
# for upper
ind_fit = self.ap_center[i] > -1
this_chebcoef = np.polynomial.chebyshev.chebfit(
self.y[ind_fit], self.ap_upper[i][ind_fit], deg=deg)
ap_upper_chebcoef.append(this_chebcoef)
ap_upper_interp.append(
np.polynomial.chebyshev.chebval(ap_col_interp, this_chebcoef))
# for lower
this_chebcoef = np.polynomial.chebyshev.chebfit(
self.y[ind_fit], self.ap_lower[i][ind_fit], deg=deg)
ap_lower_chebcoef.append(this_chebcoef)
ap_lower_interp.append(
np.polynomial.chebyshev.chebval(ap_col_interp, this_chebcoef))
# for center
this_chebcoef = np.polynomial.chebyshev.chebfit(
self.y[ind_fit], self.ap_center[i][ind_fit], deg=deg)
ap_center_chebcoef.append(this_chebcoef)
ap_center_interp.append(
np.polynomial.chebyshev.chebval(ap_col_interp, this_chebcoef))
# transform to numpy.array format
self.ap_upper_interp = np.array(ap_upper_interp)
self.ap_lower_interp = np.array(ap_lower_interp)
self.ap_center_interp = np.array(ap_center_interp)
self.ap_upper_chebcoef = np.array(ap_upper_chebcoef)
self.ap_lower_chebcoef = np.array(ap_lower_chebcoef)
self.ap_center_chebcoef = np.array(ap_center_chebcoef)
# center trace: center is not fitted but averaged from edges
# self.ap_center_interp = (ap_upper_interp + ap_lower_interp) / 2.
self.ispolyfitted = True
return
def background(self, im, npix_inter=5, q=(40, 5), sigma=(10, 10), kernel_size=(11, 11)):
""" newly developed on 2017-05-28, with best performance """
return apbackground(im, self.ap_center, q=q, npix_inter=npix_inter,
sigma=sigma, kernel_size=kernel_size)
def get_aperture_section(self, im, iap):
""" get an aperture section
Parameters
----------
im : ndarray
The target image.
iap : int
The aperture index.
Returns
-------
ap_im : ndarray
DESCRIPTION.
ap_im_xx : ndarray
x coordinates.
ap_im_yy : ndarray
y coordinates.
ap_im_xx_cor : ndarray
x offset from center.
"""
return get_aperture_section(im, self.ap_center_interp[iap], ap_width=self.ap_width)
def extract_profile_simple(self, ap_im, ap_im_xx_cor, profile_oversample=10, profile_smoothness=1e-1):
return extract_profile_simple(ap_im, ap_im_xx_cor, self.ap_width,
profile_oversample=profile_oversample,
profile_smoothness=profile_smoothness)
def extract_profile(self, ap_im, ap_im_xx_cor, profile_smoothness=1e-2, n_chunks=8,
ap_width=15., profile_oversample=10., ndev=4):
return extract_profile(ap_im, ap_im_xx_cor, profile_smoothness=profile_smoothness, n_chunks=n_chunks,
ap_width=ap_width, profile_oversample=profile_oversample, ndev=ndev)
def make_normflat(self, im, max_dqe=0.04, min_snr=20, smooth_blaze=5, n_chunks=8,
profile_oversample=10, profile_smoothness=1e-2, num_sigma_clipping=20, gain=1., ron=0, n_jobs=1):
""" normalize FLAT
Parameters
----------
im : ndarray
the target image.
max_dqe : float, optional
The max deviation of Quantum Efficiency from 1.0. The default is 0.04.
min_snr : flaot, optional
Ignore the region with snr<min_snr. The default is 20.
smooth_blaze : int, optional
The smooth kernel width / pixel. The default is 5.
n_chunks : int, optional
Split each aperture to n_chunks chunks. The default is 8.
profile_oversample : int, optional
Oversampling factor of spatial profile. The default is 10.
profile_smoothness : float, optional
The smoothness of the profile. The default is 1e-2.
num_sigma_clipping : float, optional
The sigma-clipping value / sigma. The default is 20.
gain : flaot, optional
The gain of the image. The default is 1..
ron : float, optional
The readout noise. The default is 0.
n_jobs : int, optional
The number of processes launched. The default is 1.
Returns
-------
blaze, im_norm : ndarray
The blaze functions and sensitivity image.
"""
blaze, im_norm = make_normflat(
im, self, max_dqe=max_dqe, min_snr=min_snr, smooth_blaze=smooth_blaze, n_chunks=n_chunks,
profile_oversample=profile_oversample, profile_smoothness=profile_smoothness,
num_sigma_clipping=num_sigma_clipping, gain=gain, ron=ron, n_jobs=n_jobs)
return blaze, im_norm
def extract_all(self, im, n_chunks=8, profile_oversample=10, profile_smoothness=1e-2,
num_sigma_clipping=10, gain=1., ron=0, n_jobs=-1,
verbose=False, backend="multiprocessing"):
""" extract all apertures with both simple & profile extraction
Parameters
----------
im : ndarray
The target image.
n_chunks : int, optional
The number of chunks. The default is 8.
profile_oversample : int, optional
The oversampling factor of the profile. The default is 10.
profile_smoothness : float, optional
The smoothness of the profile. The default is 1e-2.
num_sigma_clipping : float, optional
The sigma clipping threshold. The default is 5..
gain : float, optional
The gain of CCD. The default is 1..
ron : flaot, optional
The readout noise of CCD. The default is 0.
n_jobs : int, optional
The number of processes launched. The default is -1.
verbose:
defaults to False
backend:
joblib backend
Returns
-------
dict
a dict sconsisting of many results.
"""
return extract_all(im, self, n_chunks=n_chunks, profile_oversample=profile_oversample,
profile_smoothness=profile_smoothness, num_sigma_clipping=num_sigma_clipping,
gain=gain, ron=ron, n_jobs=n_jobs, verbose=verbose, backend=backend)
def extract_aperture(self, iap, im, n_chunks=8, profile_oversample=10, profile_smoothness=1e-2,
num_sigma_clipping=5., gain=1., ron=0):
""" Extract one aperture
Parameters
----------
iap : int
Extract the iap th aperture.
im : ndarray
The target image.
n_chunks : int, optional
The number of chunks. The default is 8.
profile_oversample : int, optional
The oversampling factor of the profile. The default is 10.
profile_smoothness : float, optional
The smoothness of the profile. The default is 1e-2.
num_sigma_clipping : float, optional
The sigma clipping threshold. The default is 5..
gain : float, optional
The gain of CCD. The default is 1..
ron : flaot, optional
The readout noise of CCD. The default is 0.
| |
== Node.ELEMENT_NODE and nodeName_ == "directory2D":
obj_ = XSDataFile()
obj_.build(child_)
self.setDirectory2D(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "directoryMisc":
obj_ = XSDataFile()
obj_.build(child_)
self.setDirectoryMisc(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "forceReprocess":
obj_ = XSDataBoolean()
obj_.build(child_)
self.setForceReprocess(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "absoluteFidelity":
obj_ = XSDataDouble()
obj_.build(child_)
self.setAbsoluteFidelity(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "relativeFidelity":
obj_ = XSDataDouble()
obj_.build(child_)
self.setRelativeFidelity(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "rawImageSize":
obj_ = XSDataInteger()
obj_.build(child_)
self.setRawImageSize(obj_)
XSDataInput.buildChildren(self, child_, nodeName_)
# Method for marshalling an object
def marshal(self):
oStreamString = StringIO()
oStreamString.write(unicode('<?xml version="1.0" ?>\n'))
self.export(oStreamString, 0, name_="XSDataInputBioSaxsReduceFileSeriev1_0")
oStringXML = oStreamString.getvalue()
oStreamString.close()
return oStringXML
# Only to export the entire XML tree to a file stream on disk
def exportToFile(self, _outfileName):
outfile = open(_outfileName, "w")
outfile.write(unicode('<?xml version="1.0" ?>\n'))
self.export(outfile, 0, name_="XSDataInputBioSaxsReduceFileSeriev1_0")
outfile.close()
# Deprecated method, replaced by exportToFile
def outputFile(self, _outfileName):
print(
"WARNING: Method outputFile in class XSDataInputBioSaxsReduceFileSeriev1_0 is deprecated, please use instead exportToFile!"
)
self.exportToFile(_outfileName)
# Method for making a copy in a new instance
def copy(self):
return XSDataInputBioSaxsReduceFileSeriev1_0.parseString(self.marshal())
# Static method for parsing a string
def parseString(_inString):
doc = minidom.parseString(_inString)
rootNode = doc.documentElement
rootObj = XSDataInputBioSaxsReduceFileSeriev1_0()
rootObj.build(rootNode)
# Check that all minOccurs are obeyed by marshalling the created object
oStreamString = StringIO()
rootObj.export(oStreamString, 0, name_="XSDataInputBioSaxsReduceFileSeriev1_0")
oStreamString.close()
return rootObj
parseString = staticmethod(parseString)
# Static method for parsing a file
def parseFile(_inFilePath):
doc = minidom.parse(_inFilePath)
rootNode = doc.documentElement
rootObj = XSDataInputBioSaxsReduceFileSeriev1_0()
rootObj.build(rootNode)
return rootObj
parseFile = staticmethod(parseFile)
# end class XSDataInputBioSaxsReduceFileSeriev1_0
class XSDataInputBioSaxsSample(XSDataInput):
"""temporary class for multiple inhertitance emulation"""
def __init__(
self, configuration=None, code=None, comments=None, concentration=None
):
XSDataInput.__init__(self, configuration)
if concentration is None:
self._concentration = None
elif concentration.__class__.__name__ == "XSDataDouble":
self._concentration = concentration
else:
strMessage = (
"ERROR! XSDataInputBioSaxsSample constructor argument 'concentration' is not XSDataDouble but %s"
% self._concentration.__class__.__name__
)
raise BaseException(strMessage)
if comments is None:
self._comments = None
elif comments.__class__.__name__ == "XSDataString":
self._comments = comments
else:
strMessage = (
"ERROR! XSDataInputBioSaxsSample constructor argument 'comments' is not XSDataString but %s"
% self._comments.__class__.__name__
)
raise BaseException(strMessage)
if code is None:
self._code = None
elif code.__class__.__name__ == "XSDataString":
self._code = code
else:
strMessage = (
"ERROR! XSDataInputBioSaxsSample constructor argument 'code' is not XSDataString but %s"
% self._code.__class__.__name__
)
raise BaseException(strMessage)
# Methods and properties for the 'concentration' attribute
def getConcentration(self):
return self._concentration
def setConcentration(self, concentration):
if concentration is None:
self._concentration = None
elif concentration.__class__.__name__ == "XSDataDouble":
self._concentration = concentration
else:
strMessage = (
"ERROR! XSDataInputBioSaxsSample.setConcentration argument is not XSDataDouble but %s"
% concentration.__class__.__name__
)
raise BaseException(strMessage)
def delConcentration(self):
self._concentration = None
concentration = property(
getConcentration,
setConcentration,
delConcentration,
"Property for concentration",
)
# Methods and properties for the 'comments' attribute
def getComments(self):
return self._comments
def setComments(self, comments):
if comments is None:
self._comments = None
elif comments.__class__.__name__ == "XSDataString":
self._comments = comments
else:
strMessage = (
"ERROR! XSDataInputBioSaxsSample.setComments argument is not XSDataString but %s"
% comments.__class__.__name__
)
raise BaseException(strMessage)
def delComments(self):
self._comments = None
comments = property(getComments, setComments, delComments, "Property for comments")
# Methods and properties for the 'code' attribute
def getCode(self):
return self._code
def setCode(self, code):
if code is None:
self._code = None
elif code.__class__.__name__ == "XSDataString":
self._code = code
else:
strMessage = (
"ERROR! XSDataInputBioSaxsSample.setCode argument is not XSDataString but %s"
% code.__class__.__name__
)
raise BaseException(strMessage)
def delCode(self):
self._code = None
code = property(getCode, setCode, delCode, "Property for code")
def export(self, outfile, level, name_="XSDataInputBioSaxsSample"):
showIndent(outfile, level)
outfile.write(unicode("<%s>\n" % name_))
self.exportChildren(outfile, level + 1, name_)
showIndent(outfile, level)
outfile.write(unicode("</%s>\n" % name_))
def exportChildren(self, outfile, level, name_="XSDataInputBioSaxsSample"):
XSDataInput.exportChildren(self, outfile, level, name_)
if self._concentration is not None:
self.concentration.export(outfile, level, name_="concentration")
if self._comments is not None:
self.comments.export(outfile, level, name_="comments")
if self._code is not None:
self.code.export(outfile, level, name_="code")
def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(":")[-1]
self.buildChildren(child_, nodeName_)
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "concentration":
obj_ = XSDataDouble()
obj_.build(child_)
self.setConcentration(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "comments":
obj_ = XSDataString()
obj_.build(child_)
self.setComments(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "code":
obj_ = XSDataString()
obj_.build(child_)
self.setCode(obj_)
XSDataInput.buildChildren(self, child_, nodeName_)
# Method for marshalling an object
def marshal(self):
oStreamString = StringIO()
oStreamString.write(unicode('<?xml version="1.0" ?>\n'))
self.export(oStreamString, 0, name_="XSDataInputBioSaxsSample")
oStringXML = oStreamString.getvalue()
oStreamString.close()
return oStringXML
# Only to export the entire XML tree to a file stream on disk
def exportToFile(self, _outfileName):
outfile = open(_outfileName, "w")
outfile.write(unicode('<?xml version="1.0" ?>\n'))
self.export(outfile, 0, name_="XSDataInputBioSaxsSample")
outfile.close()
# Deprecated method, replaced by exportToFile
def outputFile(self, _outfileName):
print(
"WARNING: Method outputFile in class XSDataInputBioSaxsSample is deprecated, please use instead exportToFile!"
)
self.exportToFile(_outfileName)
# Method for making a copy in a new instance
def copy(self):
return XSDataInputBioSaxsSample.parseString(self.marshal())
# Static method for parsing a string
def parseString(_inString):
doc = minidom.parseString(_inString)
rootNode = doc.documentElement
rootObj = XSDataInputBioSaxsSample()
rootObj.build(rootNode)
# Check that all minOccurs are obeyed by marshalling the created object
oStreamString = StringIO()
rootObj.export(oStreamString, 0, name_="XSDataInputBioSaxsSample")
oStreamString.close()
return rootObj
parseString = staticmethod(parseString)
# Static method for parsing a file
def parseFile(_inFilePath):
doc = minidom.parse(_inFilePath)
rootNode = doc.documentElement
rootObj = XSDataInputBioSaxsSample()
rootObj.build(rootNode)
return rootObj
parseFile = staticmethod(parseFile)
# end class XSDataInputBioSaxsSample
class XSDataInputBioSaxsSmartMergev1_0(XSDataInput):
def __init__(
self,
configuration=None,
bufferCurves=None,
runId=None,
subtractedCurve=None,
mergedCurve=None,
sample=None,
relativeFidelity=None,
absoluteFidelity=None,
inputCurves=None,
):
XSDataInput.__init__(self, configuration)
if inputCurves is None:
self._inputCurves = []
elif inputCurves.__class__.__name__ == "list":
self._inputCurves = inputCurves
else:
strMessage = (
"ERROR! XSDataInputBioSaxsSmartMergev1_0 constructor argument 'inputCurves' is not list but %s"
% self._inputCurves.__class__.__name__
)
raise BaseException(strMessage)
if absoluteFidelity is None:
self._absoluteFidelity = None
elif absoluteFidelity.__class__.__name__ == "XSDataDouble":
self._absoluteFidelity = absoluteFidelity
else:
strMessage = (
"ERROR! XSDataInputBioSaxsSmartMergev1_0 constructor argument 'absoluteFidelity' is not XSDataDouble but %s"
% self._absoluteFidelity.__class__.__name__
)
raise BaseException(strMessage)
if relativeFidelity is None:
self._relativeFidelity = None
elif relativeFidelity.__class__.__name__ == "XSDataDouble":
self._relativeFidelity = relativeFidelity
else:
strMessage = (
"ERROR! XSDataInputBioSaxsSmartMergev1_0 constructor argument 'relativeFidelity' is not XSDataDouble but %s"
% self._relativeFidelity.__class__.__name__
)
raise BaseException(strMessage)
if sample is None:
self._sample = None
elif sample.__class__.__name__ == "XSDataBioSaxsSample":
self._sample = sample
else:
strMessage = (
"ERROR! XSDataInputBioSaxsSmartMergev1_0 constructor argument 'sample' is not XSDataBioSaxsSample but %s"
% self._sample.__class__.__name__
)
raise BaseException(strMessage)
if mergedCurve is None:
self._mergedCurve = None
elif mergedCurve.__class__.__name__ == "XSDataFile":
self._mergedCurve = mergedCurve
else:
strMessage = (
"ERROR! XSDataInputBioSaxsSmartMergev1_0 constructor argument 'mergedCurve' is not XSDataFile but %s"
% self._mergedCurve.__class__.__name__
)
raise BaseException(strMessage)
if subtractedCurve is None:
self._subtractedCurve = None
elif subtractedCurve.__class__.__name__ == "XSDataFile":
self._subtractedCurve = subtractedCurve
else:
strMessage = (
"ERROR! XSDataInputBioSaxsSmartMergev1_0 constructor argument 'subtractedCurve' is not XSDataFile but %s"
% self._subtractedCurve.__class__.__name__
)
raise BaseException(strMessage)
if runId is None:
self._runId = None
elif runId.__class__.__name__ == "XSDataString":
self._runId = runId
else:
strMessage = (
"ERROR! XSDataInputBioSaxsSmartMergev1_0 constructor argument 'runId' is not XSDataString but %s"
% self._runId.__class__.__name__
)
raise BaseException(strMessage)
if bufferCurves is None:
self._bufferCurves = []
elif bufferCurves.__class__.__name__ == "list":
self._bufferCurves = bufferCurves
else:
strMessage = (
"ERROR! XSDataInputBioSaxsSmartMergev1_0 constructor argument 'bufferCurves' is not list but %s"
% self._bufferCurves.__class__.__name__
)
raise BaseException(strMessage)
# Methods and properties for the 'inputCurves' attribute
def getInputCurves(self):
return self._inputCurves
def setInputCurves(self, inputCurves):
if inputCurves is None:
self._inputCurves = []
elif inputCurves.__class__.__name__ == "list":
self._inputCurves = inputCurves
else:
strMessage = (
"ERROR! XSDataInputBioSaxsSmartMergev1_0.setInputCurves argument is not list but %s"
% inputCurves.__class__.__name__
)
raise BaseException(strMessage)
def delInputCurves(self):
self._inputCurves = None
inputCurves = property(
getInputCurves, setInputCurves, delInputCurves, "Property for inputCurves"
)
def addInputCurves(self, value):
if value is None:
strMessage = "ERROR! XSDataInputBioSaxsSmartMergev1_0.addInputCurves argument is None"
raise BaseException(strMessage)
elif value.__class__.__name__ == "XSDataFile":
self._inputCurves.append(value)
else:
strMessage = (
"ERROR! XSDataInputBioSaxsSmartMergev1_0.addInputCurves argument is not XSDataFile but %s"
% value.__class__.__name__
)
raise BaseException(strMessage)
def insertInputCurves(self, index, value):
if index is None:
strMessage = "ERROR! XSDataInputBioSaxsSmartMergev1_0.insertInputCurves argument 'index' is None"
raise BaseException(strMessage)
if value is None:
strMessage = "ERROR! XSDataInputBioSaxsSmartMergev1_0.insertInputCurves argument 'value' is None"
raise BaseException(strMessage)
elif value.__class__.__name__ == "XSDataFile":
self._inputCurves[index] = value
else:
strMessage = (
"ERROR! XSDataInputBioSaxsSmartMergev1_0.addInputCurves argument is not XSDataFile but %s"
% value.__class__.__name__
)
raise BaseException(strMessage)
# Methods and properties for the 'absoluteFidelity' attribute
def getAbsoluteFidelity(self):
return self._absoluteFidelity
def setAbsoluteFidelity(self, absoluteFidelity):
if absoluteFidelity is None:
self._absoluteFidelity = None
elif absoluteFidelity.__class__.__name__ == "XSDataDouble":
self._absoluteFidelity = absoluteFidelity
else:
strMessage = (
"ERROR! XSDataInputBioSaxsSmartMergev1_0.setAbsoluteFidelity argument is not XSDataDouble but %s"
% | |
# -*- coding: utf-8 -*-
"""Data_preprocess.ipynb
Automatically generated by Colaboratory.
# DATA
"""
import numpy as np
import glob
import random
import struct
import csv
from tensorflow.core.example import example_pb2
import tensorflow as tf
from threading import Thread
from queue import Queue
import time
import threading
"""## Vocabulary"""
SENTENCE_START = '<s>'
SENTENCE_END = '</s>'
PAD_TOKEN = '[PAD]'
UNKNOWN_TOKEN = '[UNK]'
START_DECODING = '[START]'
STOP_DECODING = '[STOP]'
class Vocab:
def __init__(self, vocab_file, max_size):
self.word2id = {UNKNOWN_TOKEN : 0, PAD_TOKEN : 1, START_DECODING : 2, STOP_DECODING : 3}
self.id2word = {0 : UNKNOWN_TOKEN, 1 : PAD_TOKEN, 2 : START_DECODING, 3 : STOP_DECODING}
self.count = 4
with open(vocab_file, 'r') as f:
for line in f:
pieces = line.split()
if len(pieces) != 2 :
print('Warning : incorrectly formatted line in vocabulary file : %s\n' % line)
continue
w = pieces[0]
if w in [SENTENCE_START, SENTENCE_END, UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:
raise Exception('<s>, </s>, [UNK], [PAD], [START] and [STOP] shouldn\'t be in the vocab file, but %s is' % w)
if w in self.word2id:
raise Exception('Duplicated word in vocabulary file: %s' % w)
self.word2id[w] = self.count
self.id2word[self.count] = w
self.count += 1
if max_size != 0 and self.count >= max_size:
print("max_size of vocab was specified as %i; we now have %i words. Stopping reading." % (max_size, self.count))
break
print("Finished constructing vocabulary of %i total words. Last word added: %s" % (self.count, self.id2word[self.count-1]))
def word_to_id(self, word):
if word not in self.word2id:
return self.word2id[UNKNOWN_TOKEN]
return self.word2id[word]
def id_to_word(self, word_id):
if word_id not in self.id2word:
raise ValueError('Id not found in vocab: %d' % word_id)
return self.id2word[word_id]
def size(self):
return self.count
"""## Data helpers"""
def article_to_ids(article_words, vocab):
ids = []
oovs = []
unk_id = vocab.word_to_id(UNKNOWN_TOKEN)
for w in article_words:
i = vocab.word_to_id(w)
if i == unk_id: # If w is OOV
if w not in oovs: # Add to list of OOVs
oovs.append(w)
oov_num = oovs.index(w) # This is 0 for the first article OOV, 1 for the second article OOV...
ids.append(vocab.size() + oov_num) # This is e.g. 50000 for the first article OOV, 50001 for the second...
else:
ids.append(i)
return ids, oovs
def abstract_to_ids(abstract_words, vocab, article_oovs):
ids = []
unk_id = vocab.word_to_id(UNKNOWN_TOKEN)
for w in abstract_words:
i = vocab.word_to_id(w)
if i == unk_id: # If w is an OOV word
if w in article_oovs: # If w is an in-article OOV
vocab_idx = vocab.size() + article_oovs.index(w) # Map to its temporary article OOV number
ids.append(vocab_idx)
else: # If w is an out-of-article OOV
ids.append(unk_id) # Map to the UNK token id
else:
ids.append(i)
return ids
def output_to_words(id_list, vocab, article_oovs):
words = []
for i in id_list:
try:
w = vocab.id_to_word(i) # might be [UNK]
except ValueError as e: # w is OOV
assert article_oovs is not None, "Error: model produced a word ID that isn't in the vocabulary. This should not happen in baseline (no pointer-generator) mode"
article_oov_idx = i - vocab.size()
try:
w = article_oovs[article_oov_idx]
except ValueError as e: # i doesn't correspond to an article oov
raise ValueError('Error: model produced word ID %i which corresponds to article OOV %i but this example only has %i article OOVs' % (i, article_oov_idx, len(article_oovs)))
words.append(w)
return words
def abstract_to_sents(abstract):
"""Splits abstract text from datafile into list of sentences.
Args:
abstract: string containing <s> and </s> tags for starts and ends of sentences
Returns:
sents: List of sentence strings (no tags)"""
cur = 0
sents = []
while True:
try:
start_p = abstract.index(SENTENCE_START, cur)
end_p = abstract.index(SENTENCE_END, start_p + 1)
cur = end_p + len(SENTENCE_END)
sents.append(abstract[start_p+len(SENTENCE_START):end_p])
except ValueError as e: # no more sentences
return sents
def example_generator(data_path, hpm):
while True:
filelist = glob.glob(data_path) # get the list of datafiles
assert filelist, ('Error: Empty filelist at %s' % data_path) # check filelist isn't empty
if hpm['singlepass']:
filelist = sorted(filelist)
else:
random.shuffle(filelist)
for f in filelist:
reader = open(f, 'rb')
while True:
len_bytes = reader.read(8)
if not len_bytes: break # finished reading this file
str_len = struct.unpack('q', len_bytes)[0]
example_str = struct.unpack('%ds' % str_len, reader.read(str_len))[0]
yield example_pb2.Example.FromString(example_str)
if hpm['singlepass'] or hpm['finished']:
print("example_generator completed reading all datafiles. No more data.")
break
"""# Batcher"""
class Example(object):
"""Class representing a train/val/test example for text summarization."""
def __init__(self, article, abstract_sentences, vocab, hpm):
"""Initializes the Example, performing tokenization and truncation to produce the encoder, decoder and target sequences, which are stored in self.
Args:
article: source text; a string. each token is separated by a single space.
abstract_sentences: list of strings, one per abstract sentence. In each sentence, each token is separated by a single space.
vocab: Vocabulary object
hps: hyperparameters
"""
self.hpm = hpm
# Get ids of special tokens
start_decoding = vocab.word_to_id(START_DECODING)
stop_decoding = vocab.word_to_id(STOP_DECODING)
# Process the article
article_words = article.split()
if len(article_words) > hpm['max_enc_len']:
article_words = article_words[:hpm['max_enc_len']]
self.enc_len = len(article_words) # store the length after truncation but before padding
self.enc_input = [vocab.word_to_id(w) for w in article_words] # list of word ids; OOVs are represented by the id for UNK token
# Process the abstract
abstract = ' '.join(abstract_sentences) # string
abstract_words = abstract.split() # list of strings
abs_ids = [vocab.word_to_id(w) for w in abstract_words] # list of word ids; OOVs are represented by the id for UNK token
# Get the decoder input sequence and target sequence
self.dec_input, self.target = self.get_dec_inp_targ_seqs(abs_ids, hpm['max_dec_len'], start_decoding, stop_decoding)
self.dec_len = len(self.dec_input)
# If using pointer-generator mode, we need to store some extra info
if hpm['pointer_gen']:
# Store a version of the enc_input where in-article OOVs are represented by their temporary OOV id; also store the in-article OOVs words themselves
self.enc_input_extend_vocab, self.article_oovs = article_to_ids(article_words, vocab)
# Get a verison of the reference summary where in-article OOVs are represented by their temporary article OOV id
abs_ids_extend_vocab = abstract_to_ids(abstract_words, vocab, self.article_oovs)
# Overwrite decoder target sequence so it uses the temp article OOV ids
_, self.target = self.get_dec_inp_targ_seqs(abs_ids_extend_vocab, hpm['max_dec_len'], start_decoding, stop_decoding)
# Store the original strings
self.original_article = article
self.original_abstract = abstract
self.original_abstract_sents = abstract_sentences
def get_dec_inp_targ_seqs(self, sequence, max_len, start_id, stop_id):
"""Given the reference summary as a sequence of tokens, return the input sequence for the decoder, and the target sequence which we will use to calculate loss. The sequence will be truncated if it is longer than max_len. The input sequence must start with the start_id and the target sequence must end with the stop_id (but not if it's been truncated).
Args:
sequence: List of ids (integers)
max_len: integer
start_id: integer
stop_id: integer
Returns:
inp: sequence length <=max_len starting with start_id
target: sequence same length as input, ending with stop_id only if there was no truncation
"""
inp = [start_id] + sequence[:]
target = sequence[:]
if len(inp) > max_len: # truncate
inp = inp[:max_len]
target = target[:max_len] # no end_token
else: # no truncation
target.append(stop_id) # end token
assert len(inp) == len(target)
return inp, target
def pad_decoder_inp_targ(self, max_len, pad_id):
"""Pad decoder input and target sequences with pad_id up to max_len."""
while len(self.dec_input) < max_len:
self.dec_input.append(pad_id)
while len(self.target) < max_len:
self.target.append(pad_id)
def pad_encoder_input(self, max_len, pad_id):
"""Pad the encoder input sequence with pad_id up to max_len."""
while len(self.enc_input) < max_len:
self.enc_input.append(pad_id)
if self.hpm['pointer_gen']:
while len(self.enc_input_extend_vocab) < max_len:
self.enc_input_extend_vocab.append(pad_id)
class Batch(object):
"""Class representing a minibatch of train/val/test examples for text summarization."""
def __init__(self, example_list, hpm, vocab):
"""Turns the example_list into a Batch object.
Args:
example_list: List of Example objects
hpm: hyperparameters
vocab: Vocabulary object
"""
self.pad_id = vocab.word_to_id(PAD_TOKEN) # id of the PAD token used to pad sequences
self.init_encoder_seq(example_list, hpm) # initialize the input to the encoder
self.init_decoder_seq(example_list, hpm) # initialize the input and targets for the decoder
self.store_orig_strings(example_list) # store the original strings
def init_encoder_seq(self, example_list, hpm):
"""Initializes the following:
self.enc_batch:
numpy array of shape (batch_size, <=max_enc_steps) containing integer ids (all OOVs represented by UNK id), padded to length of longest sequence in the batch
self.enc_lens:
numpy array of shape (batch_size) containing integers. The (truncated) length of each encoder input sequence (pre-padding).
self.enc_padding_mask:
numpy array of shape (batch_size, <=max_enc_steps), containing 1s and 0s. 1s correspond to real tokens in enc_batch and target_batch; 0s correspond to | |
#!/usr/bin/env python
"""
Squidpeek - Per-URL Squid Logfile Metrics
<NAME> <<EMAIL>>
"""
__license__ = """
Copyright (c) 2006-2013 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__version__ = "1.5.1"
import sys
import os
import time
import urlparse
import urllib
import hashlib
import re
import socket
from UserDict import UserDict
max_url_len = 96
HIT = 1 # served without contacting origin server
MEMORY_HIT = 2 # HIT from memory
STALE_HIT = 3 # HIT while stale or error
MISS = 4 # some contact with origin server necessary
SERVER_VALIDATE = 5 # server tried to validate
SERVER_VALIDATE_YES = 6 # validated successfully
SERVER_FAIL = 7 # problem getting to the server
CLIENT_NOCACHE = 8 # client asked us not to
ASYNC = 9 # background fetch; not client-related
NEGATIVE_HIT = 10 # cached error
CLIENT_ERR = 11 # client-side problem # TODO: not currently used
log_tags = {
'TCP_HIT': [HIT],
'TCP_MISS': [MISS],
'TCP_REFRESH_HIT': [MISS, SERVER_VALIDATE, SERVER_VALIDATE_YES],
'TCP_REFRESH_FAIL_HIT': [MISS, SERVER_VALIDATE, SERVER_FAIL],
'TCP_REF_FAIL_HIT': [MISS, SERVER_VALIDATE, SERVER_FAIL],
'TCP_REFRESH_MISS': [MISS, SERVER_VALIDATE],
'TCP_CLIENT_REFRESH_MISS': [MISS, CLIENT_NOCACHE],
'TCP_CLIENT_REFRESH': [MISS, CLIENT_NOCACHE],
'TCP_IMS_HIT': [HIT],
'TCP_IMS_MISS': [MISS],
'TCP_SWAPFAIL_MISS': [MISS],
'TCP_SWAPFAIL': [MISS],
'TCP_NEGATIVE_HIT': [HIT, NEGATIVE_HIT],
'TCP_MEM_HIT': [HIT, MEMORY_HIT],
'TCP_DENIED': [CLIENT_ERR],
'TCP_OFFLINE_HIT': [HIT],
'NONE': [MISS, SERVER_FAIL], # ?
'TCP_STALE_HIT': [HIT, STALE_HIT],
'TCP_ASYNC_HIT': [ASYNC],
'TCP_ASYNC_MISS': [ASYNC],
'ERR_CLIENT_ABORT': [CLIENT_ERR],
'ERR_CONNECT_FAIL': [SERVER_FAIL],
'ERR_DNS_FAIL': [SERVER_FAIL],
'ERR_INVAID_REQ': [CLIENT_ERR],
'ERR_READ_TIMEOUT': [SERVER_FAIL],
'ERR_PROXY_DENIED': [CLIENT_ERR],
'ERR_UNKNOWN': [CLIENT_ERR],
}
status_colors = {
1: (255,255,255,255),
2: (32,128,32,255),
3: (32,32,128,255),
4: (160,160,32,255),
5: (128,32,32,255),
}
unknown_color = (192,192,192,0)
def main(fh, num_urls=100, ignore_query=True, debug=False):
from squidpeek_lib.squidlog import AccessParser as SquidAccessParser
from squidpeek_lib.sparkogram import Sparkogram
from squidpeek_lib.sparkbar import Sparkbar
log = SquidAccessParser(fh, debug=debug)
urls = {}
hot_urls = CacheDict(urls, max_size=max(2000, 10*num_urls), trim_to=.5)
first_utime = None
line = {} # if the log is empty...
for line in log:
if first_utime == None:
first_utime = line['utime']
if line['log_tag'][:3] == 'UDP': continue # ignore ICP
if line['log_tag'][:9] == 'TCP_ASYNC': continue # ignore async
if line.has_key('extra_0'): # assume that the extra field is an url-encoded list of the Link header values. Not brilliant, but...
key = parse_link(urllib.unquote(line['extra_0']))
else:
key = line['url']
if ignore_query:
scheme, authority, path, query, fragment = urlparse.urlsplit(key)
path = "/".join([seg.split(";",1)[0] for seg in path.split('/')])
key = urlparse.urlunsplit((scheme, authority, path, '', ''))
hash_key = hashUrl(key)
urls[hash_key] = urls.get(hash_key, 0) + 1
tmp = hot_urls.get(key, {
'kbytes': Sparkogram(0,256),
'elapsed': Sparkogram(0,1000),
'status': {},
'types': {},
'query': {},
})
if 200 <= line['status'] < 300:
tmp['kbytes'].append(line['bytes'] / 1024.0)
try:
tmp['status'][line['status'] / 100] += 1
except KeyError:
tmp['status'][line['status'] / 100] = 1
try:
tag_types = log_tags[line['log_tag']]
except KeyError:
if debug:
sys.stderr.write(
"Unknown log tag %s (line %s)" % (
line['log_tag'], log.num_processed
))
continue
if MISS in tag_types:
tmp['elapsed'].append(line['elapsed'])
try:
for tag_type in tag_types:
try:
tmp['types'][tag_type] += 1
except KeyError:
tmp['types'][tag_type] = 1
except KeyError:
sys.stderr.write("Warning: unrecognised log tag: %s" % line['log_tag'])
if ignore_query:
hash_url = hashUrl(line['url'])[:8]
try:
tmp['query'][hash_url] += 1
except KeyError:
tmp['query'][hash_url] = 1
hot_urls[key] = tmp
# TODO: url diversity
url_list = hot_urls.keys()
url_list.sort(lambda a, b, u=urls: cmp(u[hashUrl(b)], u[hashUrl(a)]))
print """
<html>
<head>
<style type="text/css">
body {
font-family: sans-serif;
}
th {
text-align: left;
background-color: 333;
color: white;
font-weight: normal;
padding: 1px 3px;
}
td {
text-align: right;
}
td.secondary {
background-color: #eee;
}
tr:hover td {
background-color: #ffc;
color: black;
}
table {
font-size: 75%%;
}
th a {
color: white;
text-decoration: none;
}
.key {
width: 90%%;
max-width: 800px;
}
dt {
font-weight: bold;
}
.bg0 { background-color: #fff; color: #000; }
.bg1 { background-color: #eee; color: #000; }
.bg2 { background-color: #ddd; color: #000; }
.bg3 { background-color: #ccc; color: #000; }
.bg4 { background-color: #bbb; color: #000; }
.bg5 { background-color: #aaa; color: #000; }
.bg6 { background-color: #999; color: #fff; }
.bg7 { background-color: #888; color: #fff; }
.bg8 { background-color: #777; color: #fff; }
.bg9 { background-color: #666; color: #fff; }
.bg10 { background-color: #555; color: #fff; }
</style>
<title>Squidpeek: %s log lines / %s URLs</title>
</head>
<body>
<h1>Squidpeek</h1>
<ul>
<li>%s log lines analysed, %i parsing errors</li>
<li>%i distinct URLs seen, showing top %i</li>
<li>Start: <strong>%s</strong></li>
<li>End: <strong>%s</strong></li>
</ul>
<p><em><a href="#key">Key</a></em></p>
<table>
""" % ( log.num_processed,
len(urls),
log.num_processed,
log.num_error,
len(urls),
num_urls,
time.ctime(first_utime),
time.ctime(line.get('utime', None)),
)
if ignore_query:
query_div_hdr = "<th colspan='2'>query diversity</th>"
else:
query_div_hdr = ""
header_line = """\
<tr>
<th>url</th>
<th>accesses</th>
%s
<th colspan='2'>hits</th>
<th colspan='2'>misses</th>
<th colspan='2'>miss msec</th>
<th colspan='2'>kbytes</th>
<th>status codes</th>
""" % query_div_hdr
i = 0
for url in url_list[:num_urls]:
hash_url = hashUrl(url)
if i % 25 == 0:
print header_line
i += 1
access = urls[hash_url]
types = hot_urls[url]['types']
# accesses
print "<tr><th><a href='%s'>%s</a></th><td class='secondary'>%7i</td>" % (url, url[:max_url_len], access)
# query diversity
if ignore_query:
query_set = hot_urls[url]['query'].values()
query_ttl = float(sum(query_set))
query_set.sort()
query_set.reverse()
q_div = Sparkogram(0, access) # hack, hack, hack
qn = 1
for q in query_set:
for qc in xrange(q):
q_div.append(qn)
qn += 1
img = q_div.img()
if img:
print """\
<td>%3i</td>
<td class='secondary'><img src='%s' title='most popular: %4i%% of accesses'/></td>
""" % (q_div.max_seen, img, (q_div.max_value / float(access) * 100))
else:
print "<td></td><td></td>"
# % hits
hit_pct = types.get(HIT, 0) / float(access) * 100
print "<td class='bg%s' title='%s hits'>%2.0f%%</td>" % (int(hit_pct) / 10, types.get(HIT, 0), hit_pct)
# hits
hits = Sparkbar()
stale_hit = types.get(STALE_HIT, 0)
negative_hit = types.get(NEGATIVE_HIT, 0)
memory_hit = types.get(MEMORY_HIT, 0)
disk_hit = types.get(HIT, 0) - stale_hit - negative_hit - memory_hit
if negative_hit:
hits.append(negative_hit, "negative hit", (128,32,32,255))
if disk_hit:
hits.append(disk_hit, "disk hit", (192,192,192,0))
if stale_hit:
hits.append(stale_hit, "stale hit", (160,160,32,255))
if memory_hit:
hits.append(memory_hit, "memory hit", (32,128,32,255))
print "<td class='secondary'>%s</td>" % hits.img()
# % misses
miss_pct = types.get(MISS, 0) / float(access) * 100
print "<td class='bg%s' title='%s misses'>%2.0f%%</td>" % (int(miss_pct) / 10, types.get(MISS, 0), miss_pct)
# misses
misses = Sparkbar()
no_cache = types.get(CLIENT_NOCACHE, 0)
validate_yes = types.get(SERVER_VALIDATE_YES, 0)
validate_no = types.get(SERVER_VALIDATE, 0) - validate_yes
no_validate = types.get(MISS, 0) - types.get(SERVER_VALIDATE, 0)
if no_cache:
misses.append(types.get(CLIENT_NOCACHE, 0), "client no-cache", (128,32,32,255))
if no_validate:
misses.append(no_validate, "no validator", (192,192,192,0))
if validate_no:
misses.append(validate_no, "validate unsuccessful", (160,160,32,255))
if validate_yes:
misses.append(validate_yes, "validate successful", (32,32,128,255))
print "<td class='secondary'>%s</td>" % misses.img()
# elapsed miss times
img = hot_urls[url]['elapsed'].img()
if img:
el = hot_urls[url]['elapsed']
print """\
<td>%4i</td>
<td class='secondary'><img src='%s' title='min: %2.0f msec\nmedian: %2.0f msec\nmax: %2.0f msec'/></td>""" % (
el.median, img, el.min_seen, el.median, el.max_seen)
else:
print "<td></td><td></td>"
# bytes
img = hot_urls[url]['kbytes'].img()
if img:
by = hot_urls[url]['kbytes']
print """\
<td>%3ik</td>
<td class='secondary'><img src='%s' title='min: %2.0fk\nmedian: %2.0fk\nmax: %2.0fk'/></td>""" % (
by.median, img, by.min_seen, by.median, by.max_seen)
else:
print "<td></td><td></td>"
# status codes
status_codes = Sparkbar()
[status_codes.append(hot_urls[url]['status'][s], '%sxx' % s, status_colors.get(s, unknown_color)) for s in hot_urls[url]['status']]
print "<td class='secondary'>%s</td>" % status_codes.img()
print "</tr>"
del hot_urls[url]
print """
</table>
<div class="key">
<h2 id="key">Key</h2>
<p>Each line in the results indicates the service statistics for one service URL. Most graphics can be 'moused over' to reveal more
detailed statistics for an individual entry.</p>
<h3>accesses</h3>
<p>This column shows how many acccesses that the URL received during the sample period. It does not include ICP or other
inter-cache traffic, nor does it include 'async' traffic caused by <tt>stale-while-revalidate</tt>.</p>
"""
if ignore_query:
print """
<h3>query diversity</h3>
<p>This column shows how many different query arguments were seen for this URL to the left, and a graph of how popular they were
to the right.</p>
<p>For example, if a URL <tt>http://example.com/foo</tt> has 1000 <tt>accesses</tt>, and a <tt>query diversity</tt> of 250, it means that 250
different | |
def update_planet_resources(self, planet):
self.miniSleep()
try:
resp = self.br.open(self._get_url('resources', planet))
soup = BeautifulSoup(resp)
metal = int(soup.find(id='resources_metal').text.replace('.', ''))
self.RESOURCESTOSEND['metal']=metal
crystal = int(soup.find(id='resources_crystal').text.replace('.', ''))
self.RESOURCESTOSEND['crystal'] = crystal
deuterium = int(soup.find(id='resources_deuterium').text.replace('.', ''))
self.RESOURCESTOSEND['deuterium'] = deuterium
except:
self.logger.exception('Exception while updating resources info')
return True
def transport_resources(self):
tasks = self.transport_manager.find_dest_planet(self.planets)
if tasks is None:
return False
self.logger.info(self.transport_manager.get_summary())
for task in iter(tasks):
self.logger.info('Transport attempt from: %s, to: %s with resources %s' \
% (task['from'], task['where'], task['resources']))
result = self.send_fleet(
task['from'],
task['where'].coords,
fleet=task['from'].get_fleet_for_resources(task['resources']),
resources=task['resources'],
mission='transport'
)
if result:
self.transport_manager.update_sent_resources(task['resources'])
self.logger.info('Resources sent: %s, resources needed: %s' \
% (task['resources'], self.transport_manager.get_resources_needed()))
return True
def send_fleet(self, origin_planet, destination, fleet={}, resources={},mission='attack', target='planet', speed=10):
if origin_planet.coords == destination:
self.logger.error('Cannot send fleet to the same planet')
return False
self.logger.info('Sending fleet from %s to %s (%s)' % (origin_planet, destination, mission))
try:
resp = self.br.open(self._get_url('fleet', origin_planet))
try:
self.br.select_form(name='shipsChosen')
except mechanize.FormNotFoundError:
self.logger.info('No available ships on the planet')
return False
# Controllo slot flotta
soup = BeautifulSoup(resp)
span = soup.find('span', title='Slots flotta Usati/Totali')
text = span.text.split(':')[1]
usati = text.split('/')[0]
disponibili = text.split('/')[1]
if usati == disponibili:
self.logger.info('No free slots (' + usati + '/' + disponibili + ')')
return False
for ship, num in fleet.iteritems():
s = soup.find(id='button' + self.SHIPS[ship])
num = int(num)
try:
available = int(s.find('span', 'textlabel').nextSibling.replace('.', ''))
except:
available = 0
if available < num and mission in ('attack', 'expedition'):
self.logger.info('No available ships to send')
return False
if num > 0:
self.br.form['am' + self.SHIPS[ship]] = str(num)
self.miniSleep()
self.br.submit()
try:
self.br.select_form(name='details')
except mechanize.FormNotFoundError:
self.logger.info('No available ships on the planet')
return False
galaxy, system, position = destination.split(':')
self.br['galaxy'] = galaxy
self.br['system'] = system
self.br['position'] = position
self.br.form.find_control("type").readonly = False
self.br['type'] = self.TARGETS[target]
self.br.form.find_control("speed").readonly = False
self.br['speed'] = speed
self.miniSleep()
try:
self.br.submit()
self.br.select_form(name='sendForm')
except Exception as e:
self.send_telegram_message("Errore selezione pianeta " + destination + ": Verificare che esista ancora.")
return False
self.br.form.find_control("mission").readonly = False
self.br.form['mission'] = self.MISSIONS[mission]
if 'metal' in resources:
self.br.form['metal'] = str(resources['metal'])
if 'crystal' in resources:
self.br.form['crystal'] = str(resources['crystal'])
if 'deuterium' in resources:
self.br.form['deuterium'] = str(resources['deuterium'])
self.miniSleep()
self.br.submit()
self.miniSleep()
except Exception as e:
self.logger.exception(e)
return False
return True
def send_message(self, url, player, subject, message):
self.logger.info('Sending message to %s: %s' % (player, message))
self.br.open(url)
self.br.select_form(nr=0)
self.br.form['betreff'] = subject
self.br.form['text'] = message
self.br.submit()
def check_attacks(self):
resp = self.br.open(self.PAGES['main']).read()
soup = BeautifulSoup(resp)
alert = soup.find(id='attack_alert')
if not alert:
self.logger.exception('Check attack failed')
return
if 'noAttack' in alert.get('class', ''):
self.logger.info('No attacks')
self.active_attacks = []
else:
self.logger.info('ATTACK!')
resp = self.br.open(self.PAGES['events'])
soup = BeautifulSoup(resp)
hostile = False
attack_id = 0
text = ''
arrivalTime = ''
originCoords = []
destCoords = ''
player = []
attackNew = False
try:
for tr in soup.findAll('tr'):
countDown = tr.find('td', 'countDown')
if countDown and 'hostile' in countDown.get('class', ''):
hostile = True
# First: check if attack was noticed
if tr.get('id'):
attack_id = tr.get('id').split('-')[1]
elif countDown.get('id'):
attack_id = countDown.get('id').split('-')[2]
if not attack_id or attack_id in [a.id for a in self.active_attacks]:
continue
if tr.get('class').split(' ')[0] == 'allianceAttack':
typeAttack = 'ATTACCO FEDERATO'
else:
typeAttack = 'ATTACCO'
if str(typeAttack) != str('ATTACCO FEDERATO') and tr.get('class').split(' ')[
0] != 'partnerInfo':
attackNew = True
try:
# Attack first discovered: save attack info
arrivalTime = tr.find('td', 'arrivalTime').text.split(' ')[0]
coordsOrigin = tr.find('td', 'coordsOrigin')
if coordsOrigin:
if coordsOrigin.find('a'):
originCoords.append(coordsOrigin.find('a').text.strip()[1:-1])
destCoords = tr.find('td', 'destCoords')
if destCoords:
destCoords = destCoords.find('a').text.strip()[1:-1]
detailsFleet.append(tr.find('td', 'detailsFleet').span.text.replace('.', ''))
player.append(tr.find('td', 'sendMail').find('a').get('title'))
except Exception as e:
self.logger.exception(e)
elif typeAttack == 'ATTACCO FEDERATO' or tr.get('class').split(' ')[0] == 'partnerInfo':
if tr.get('class').split(' ')[0] == 'partnerInfo':
coordsOrigin = tr.find('td', 'coordsOrigin')
if coordsOrigin:
if coordsOrigin.find('a'):
originCoords.append(coordsOrigin.find('a').text.strip())
player.append(tr.find('td', 'sendMail').find('a').get('title'))
detailsFleet.append(tr.find('td', 'detailsFleet').span.text.replace('.', ''))
else:
attackNew = True
arrivalTime = tr.find('td', 'arrivalTime').text.split(' ')[0]
destCoords = tr.find('td', 'destCoords')
if destCoords:
destCoords = destCoords.find('a').text.strip()[1:-1]
detailsFleet = tr.find('td', 'detailsFleet').span.text.replace('.', '')
if attackNew:
text = text + '\n\n' + str(typeAttack) + ' IN CORSO\n' \
'Orario di arrivo: ' + str(arrivalTime) + '\n' \
'Coordinate di arrivo: ' + str(destCoords) + '\n'
for i in range(0, len(player), 1):
text = text + '\t\t\t\t\tGIOCATORE: ' + str(player[i]) + '\n' \
'\t\t\t\t\tCoordinate di partenza: ' + str(originCoords[i]) + '\n' \
'\t\t\t\t\tNumero navi in arrivo: ' + str(detailsFleet[i]) + '\n'
arrivalTime = ''
destCoords = ''
detailsFleet = []
player = []
attackNew = False
self.send_telegram_message(text)
if not hostile:
self.active_attacks = []
except Exception as e:
self.logger.exception(e)
def send_telegram_message(self, message):
url = 'https://api.telegram.org/' + str(self.botTelegram) + '/sendMessage?'
if self.chatIdTelegram != '':
data = urlencode({'chat_id': self.chatIdTelegram, 'text': message})
self.br.open(url, data=data)
def collect_debris(self, p):
if not p.has_ships():
return
self.logger.info('Collecting debris from %s using %s recyclers' % (p, p.ships['rc']))
self.send_fleet(p,
p.coords,
fleet={'rc': p.ships['rc']},
mission='collect',
target='debris')
def send_expedition(self):
expedition = options['expedition']
planets = expedition['planets'].split(' ')
random.shuffle(planets)
for coords in planets[:3]:
planet = self.find_planet(coords=coords)
if planet:
galaxy, system, position = planet.coords.split(':')
expedition_coords = '%s:%s:16' % (galaxy, system)
self.send_fleet(planet, expedition_coords,
fleet={expedition['ships_kind']: expedition['ships_number']},
mission='expedition')
def get_command_from_telegram_bot(self):
import json
import time
chatIdTelegram = options['credentials']['chat_id_telegram']
botTelegram = options['credentials']['bot_telegram']
lastUpdateIdTelegram = options['credentials']['last_update_id']
url = 'https://api.telegram.org/' + str(botTelegram) + '/getUpdates?offset=' + str(int(lastUpdateIdTelegram)+1)
resp = self.br.open(url)
soup = BeautifulSoup(resp)
data_json = json.loads(str(soup))
result = data_json['result']
for id in range(0, len(result)):
timeMessage = result[id]['message']['date']
chatId = result[id]['message']['chat']['id']
text = result[id]['message']['text']
update_id = result[id]['update_id']
currentTime = int(time.time()) - 300
if timeMessage > currentTime and chatId == int(chatIdTelegram):
options.updateValue('credentials', 'last_update_id', str(update_id))
if text == '/resourcesfarmed':
self.CMD_GET_FARMED_RES = True
elif text == '/ping':
self.send_telegram_message("Pong")
elif text == '/jhonny':
self.send_telegram_message(self.INSULTI_A_JONNY[randint(0, len(self.INSULTI_A_JONNY))])
elif text == '/stop':
self.CMD_STOP = True
elif text == '/stop_farmer':
self.CMD_FARM = False
self.send_telegram_message('Farmer fermato.')
elif text == '/start_farmer':
self.CMD_FARM = True
self.send_telegram_message('Farmer riattivato.')
elif text == '/is_logged':
self.send_telegram_message("Loggato: " + str(self.logged_in))
elif text == '/login':
self.CMD_LOGIN = True
elif text == '/logout':
self.logged_in =False
self._prepare_browser()
elif text.split(' ')[0] == '/raccolta':
target = text.split(' ')[1]
self.send_transports_production(target)
self.logger.info('All planets send production to ' + str(target))
elif text.split(' ')[0] == '/attack_probe':
target = text.split(' ')[1]
self.send_attack_of_probe(target)
self.logger.info('Attack of probes to ' + str(target) + ' sended')
#
# Invio farmata di sonde
#
def farm(self):
# Carico settings
ships_kind = options['farming']['ships_kind']
ships_number = options['farming']['ships_number']
speed = options['farming']['ships_speed']
# Ciclo sui pianeti da farmare
n = 1
farms = options['farming'][self.bn_farms + str(n)].split(' ')
from_planet = options['farming'][self.bn_from_planet + str(n)]
loop = True
while loop:
# Seleziono pianeta di attacco
planet = self.find_planet(coords=from_planet, is_moon=True)
# Controllo che ci siano farm
l = len(farms)
if not (l == 0 or not farms[0]):
# Seleziono la prossima farm da attaccare
farm = farms[self.farm_no[n - 1] % l]
# Invio attacchi finche ci sono navi
while self.send_fleet(planet,farm,fleet={ships_kind: ships_number},speed=speed):
self.farm_no[n - 1] += 1
farm = farms[self.farm_no[n - 1] % l]
n += 1
try:
farms = options['farming'][self.bn_farms + str(n)].split(' ')
from_planet = options['farming'][self.bn_from_planet + str(n)]
except Exception as e:
loop = False
def send_transports_production(self,target):
for planet in self.planets:
self.update_planet_resources(planet)
numFleet = (self.RESOURCESTOSEND['metal']+self.RESOURCESTOSEND['crystal']+self.RESOURCESTOSEND['deuterium'])/25000
if int(numFleet) > 150:
self.send_fleet(planet, target, fleet={'dt':numFleet}, resources = self.RESOURCESTOSEND, mission='transport',target='planet', speed='10')
def send_farmed_res(self):
response = ''
n = 1
from_planet = options['farming'][self.bn_from_planet + str(n)]
loop = True
try:
while loop:
planet = self.find_planet(coords=from_planet, is_moon=True)
response = response + self.update_planet_resources_farmed(planet)
n += 1
try:
from_planet = options['farming'][self.bn_from_planet + str(n)]
except:
loop = False
except Exception as e:
self.logger.exception(e)
response = "Errore lettura risorse farmate: " + e.message.decode()
self.send_telegram_message(response)
self.CMD_GET_FARMED_RES = False
def sleep(self):
sleep_options = options['general']
min = int(sleep_options['seed']) - randint(0, int(sleep_options['check_interval']))
max = int(sleep_options['seed']) + randint(0, int(sleep_options['check_interval']))
sleep_time = randint(min, max)
self.logger.info('Sleeping for %s secs' % sleep_time)
if self.active_attacks:
sleep_time = 60
time.sleep(sleep_time)
def miniSleep(self):
mini_sleep_time = randint(400, 2500) / 1000
time.sleep(mini_sleep_time)
def stop(self):
self.logger.info('Stopping bot')
os.unlink(self.pidfile)
def send_attack_of_probe(self,target):
attack= True
for planet in self.planets:
if attack:
if self.send_fleet(planet, target, fleet={'ss': '1'}, speed='10'):
attack = False
break
for moon in self.moons:
if attack:
if self.send_fleet(moon, target, fleet={'ss': '1'}, speed='10'):
attack = False
break
def load_farming_planets_info(self):
response = ''
n = 1
from_planet = options['farming'][self.bn_from_planet + str(n)]
loop = True
try:
while loop:
planet = self.find_planet(coords=from_planet, is_moon=True)
self.update_planet_info(planet)
try:
n += 1
from_planet = options['farming'][self.bn_from_planet + str(n)]
except:
loop = False
except Exception as e:
self.logger.exception(e)
def refresh_mother(self):
self.round = self.round + 1
if self.round % 5 == 0:
self.br.open(self._get_url('main', self.get_mother()))
self.logger.info("Mother refreshed")
| |
#!/usr/bin/env python
""" ImageNet Training Script
This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet
training results with some of the latest networks and training techniques. It favours canonical PyTorch
and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed
and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit.
This script was started from an early version of the PyTorch ImageNet example
(https://github.com/pytorch/examples/tree/master/imagenet)
NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples
(https://github.com/NVIDIA/apex/tree/master/examples/imagenet)
Hacked together by <NAME> (https://github.com/rwightman)
"""
from timm.data import Dataset, resolve_data_config, FastCollateMixup, mixup_batch
import argparse
import time
import yaml
from datetime import datetime
from timm.models.skipnet import skip_v3
from torch.utils.tensorboard import SummaryWriter
from timm.data import lmdb_loader
import torchvision.transforms as transforms
import torchvision.datasets as datasets
has_apex = False
# from timm.data.loader import Loader
from timm.models import create_model, resume_checkpoint, convert_splitbn_model
from timm.utils import *
from timm.optim import create_optimizer
from timm.scheduler import create_scheduler
import torch
import torch.nn as nn
import torchvision.utils
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Dataset / Model parameters
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--model', default='resnet101', type=str, metavar='MODEL',
help='Name of model to train (default: "countception"')
parser.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',
help='Initialize model from this checkpoint (default: none)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='Resume full model and optimizer state from checkpoint (default: none)')
parser.add_argument('--no-resume-opt', action='store_true', default=False,
help='prevent resume of optimizer state when resuming model')
parser.add_argument('--num-classes', type=int, default=1000, metavar='N',
help='number of label classes (default: 1000)')
parser.add_argument('--gp', default='avg', type=str, metavar='POOL',
help='Type of global pool, "avg", "max", "avgmax", "avgmaxc" (default: "avg")')
parser.add_argument('--img-size', type=int, default=None, metavar='N',
help='Image patch size (default: None => model default)') # TODO resize + crop
# parser.add_argument('--crop-pct', default=None, type=float,
# metavar='N', help='Input image center crop percent (for validation only)')
# parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
# help='Override mean pixel value of dataset')
# parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
# help='Override std deviation of of dataset')
# parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
# help='Image resize interpolation type (overrides model)')
parser.add_argument('-b', '--batch-size', type=int, default=32, metavar='N',
help='input batch size for training (default: 32)')
parser.add_argument('-vb', '--validation-batch-size-multiplier', type=int, default=1, metavar='N',
help='ratio of validation batch size to training batch size (default: 1)')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-connect', type=float, default=None, metavar='PCT',
help='Drop connect rate, DEPRECATED, use drop-path (default: None)')
parser.add_argument('--drop-path', type=float, default=None, metavar='PCT',
help='Drop path rate (default: None)')
parser.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
# parser.add_argument('--jsd', action='store_true', default=False,
# help='Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.')
# Optimizer parameters
parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "sgd"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.0001,
help='weight decay (default: 0.0001)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='step', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "step"')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
# parser.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT',
# help='learning rate cycle len multiplier (default: 1.0)')
# parser.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N',
# help='learning rate cycle limit')
parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR',
help='warmup learning rate (default: 0.0001)')
# parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
# help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of epochs to train (default: 2)')
parser.add_argument('--start-epoch', default=None, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=3, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation parameters
# parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
# help='Color jitter factor (default: 0.4)')
# parser.add_argument('--aa', type=str, default=None, metavar='NAME',
# help='Use AutoAugment policy. "v0" or "original". (default: None)'),
# parser.add_argument('--aug-splits', type=int, default=0,
# help='Number of augmentation splits (default: 0, valid: 0 or >=2)')
# parser.add_argument('--reprob', type=float, default=0., metavar='PCT',
# help='Random erase prob (default: 0.)')
# parser.add_argument('--remode', type=str, default='const',
# help='Random erase mode (default: "const")')
# parser.add_argument('--recount', type=int, default=1,
# help='Random erase count (default: 1)')
# parser.add_argument('--resplit', action='store_true', default=False,
# help='Do not random erase first (clean) augmentation split')
# parser.add_argument('--mixup', type=float, default=0.0,
# help='mixup alpha, mixup enabled if > 0. (default: 0.)')
# parser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N',
# help='turn off mixup after this epoch, disabled if 0 (default: 0)')
# parser.add_argument('--smoothing', type=float, default=0.1,
# help='label smoothing (default: 0.1)')
# parser.add_argument('--train-interpolation', type=str, default='random',
# help='Training interpolation (random, bilinear, bicubic default: "random")')
# Batch norm parameters (only works with gen_efficientnet based models currently)
parser.add_argument('--bn-tf', action='store_true', default=False,
help='Use Tensorflow BatchNorm defaults for models that support it (default: False)')
parser.add_argument('--bn-momentum', type=float, default=None,
help='BatchNorm momentum override (if not None)')
parser.add_argument('--bn-eps', type=float, default=None,
help='BatchNorm epsilon override (if not None)')
# parser.add_argument('--sync-bn', action='store_true',
# help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')
# parser.add_argument('--dist-bn', type=str, default='',
# help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")')
# parser.add_argument('--split-bn', action='store_true',
# help='Enable separate BN layers per augmentation split.')
# Model Exponential Moving Average
parser.add_argument('--model-ema', action='store_true', default=False,
help='Enable tracking moving average of model weights')
# parser.add_argument('--model-ema-force-cpu', action='store_true', default=False,
# help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.')
parser.add_argument('--model-ema-decay', type=float, default=0.9998,
help='decay factor for model weights moving average (default: 0.9998)')
# Misc
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--recovery-interval', type=int, default=0, metavar='N',
help='how many batches to wait before writing recovery checkpoint')
parser.add_argument('-j', '--workers', type=int, default=4, metavar='N',
help='how many training processes to use (default: 1)')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--save-images', action='store_true', default=False,
help='save images of input bathes every log interval for debugging')
# parser.add_argument('--amp', action='store_true', default=False,
# help='use NVIDIA amp for mixed precision training')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--output', default='', type=str, metavar='PATH',
help='path to output folder (default: none, current dir)')
parser.add_argument('--eval-metric', default='top1', type=str, metavar='EVAL_METRIC',
help='Best metric (default: "top1"')
parser.add_argument('--tta', type=int, default=0, metavar='N',
help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')
# parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--use-multi-epochs-loader', action='store_true', default=False,
help='use the multi-epochs-loader to save time at the beginning of every epoch')
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def update_lr(args, optimizer, epoch, per_epoch_update=True, it=None, warmup_its=None):
if epoch < args.warmup_epochs and not per_epoch_update:
step = (args.lr - args.warmup_lr) / warmup_its
lr = it * step + args.warmup_lr
for i, param_group in enumerate(optimizer.param_groups):
param_group['lr'] = lr
elif epoch > args.warmup_epochs and per_epoch_update:
lr = args.lr * (args.decay_rate ** (epoch // args.decay_epochs))
for i, param_group in enumerate(optimizer.param_groups):
param_group['lr'] = lr
return lr
def main():
setup_default_logging()
args, args_text = _parse_args()
use_amp = True
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_connect_rate=args.drop_connect, # DEPRECATED, use drop_path
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block,
global_pool=args.gp,
bn_tf=args.bn_tf,
bn_momentum=args.bn_momentum,
bn_eps=args.bn_eps,
checkpoint_path=args.initial_checkpoint)
# model = skip_v3(num_classes=args.num_classes)
optimizer = create_optimizer(args, model)
if has_apex and args.amp:
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
use_amp = True
resume_state = {}
resume_epoch = None
if args.resume:
resume_state, resume_epoch = resume_checkpoint(model, args.resume)
for i, param_group in enumerate(optimizer.param_groups):
param_group['lr'] = resume_state['lr']
start_epoch = resume_epoch
if 'amp' in resume_state and 'load_state_dict' in amp.__dict__:
logging.info('Restoring NVIDIA AMP state from checkpoint')
amp.load_state_dict(resume_state['amp'])
del resume_state
model = nn.DataParallel(model).cuda()
print('model to data parallel')
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='',
resume=args.resume)
start_epoch = 0
if args.start_epoch is not None:
# a specified start_epoch will always override the resume epoch
start_epoch = args.start_epoch
elif resume_epoch is not None:
start_epoch = resume_epoch
# if start_epoch < args.warmup_epochs:
# update_lr(args=args, optimizer=optimizer, epoch=start_epoch, per_epoch_update=False) #TODO iterations correction
# else:
# update_lr(args=args, optimizer=optimizer, epoch=start_epoch, per_epoch_update=True)
num_epochs = args.epochs
| |
<filename>src/ctransformer.py
"""
Performs C code transformations using pycparser. All the transformations are applied in-place on the AST.
Currently supported code transformations:
- Function call creation
Creates a statement representing a function call.
- Havoc block creation
Creates a block where every variable is assigned a non-deterministic value.
- Insertion into compound statements
Inserts any node into a compound statement at a user-specified position.
"""
import copy
import sys
import random
import string
from pycparser import c_ast
from pycparserext.ext_c_parser import GnuCParser
from pycparserext.ext_c_generator import GnuCGenerator
from pycparserext.ext_c_parser import FuncDeclExt, TypeDeclExt
VERIFIER_NONDET_FUNCTION_NAME = "__VERIFIER_nondet_"
VERIFIER_ERROR_FUNCTION_NAME = "__VERIFIER_error"
ASSERT_FUNCTION_NAME = "assert"
########################################################################################################################
# Exceptions #
########################################################################################################################
class NonSvCompTypeException(Exception):
"""
Indicates that the variable type can not be transformed into a valid SV-comp non-deterministic assignment.
"""
def __init__(self, variable: str):
self.variable = variable
def __str__(self):
message = "Can not assign variable " + str(self.variable) + " a non-deterministic value."
return message
########################################################################################################################
# AST Node Visitors #
########################################################################################################################
class CompoundInserter(c_ast.NodeVisitor):
"""
Inserts the given node into the AST in a compound block or AST once it encounters its new neighbouring node.
"""
def __init__(self, new_node: c_ast.Node, before_node: c_ast.Node, after_node: c_ast.Node):
self.new_node = new_node
self.before_node = before_node
self.after_node = after_node
def generic_visit(self, node):
if type(node) != FuncDeclExt and type(node) != TypeDeclExt:
for c in node:
self.visit(c)
def visit_FileAST(self, node):
for i, (c_name, c) in enumerate(node.children()):
if self.before_node and c == self.before_node:
node.ext.insert(i, self.new_node)
elif self.after_node and c == self.after_node:
node.ext.insert(i + 1, self.new_node)
self.visit(c)
def visit_Compound(self, node):
for i, (c_name, c) in enumerate(node.children()):
if self.before_node and c == self.before_node:
node.block_items.insert(i, self.new_node)
elif self.after_node and c == self.after_node:
node.block_items.insert(i + 1, self.new_node)
self.visit(c)
class AggregateDeanonymizer(c_ast.NodeVisitor):
"""
Visits all structures and unions and adds a typename if there is none.
"""
def __init__(self, unavailable_identifiers: set):
self.unavailable_identifiers = unavailable_identifiers
def generic_visit(self, node):
if type(node) != FuncDeclExt and type(node) != TypeDeclExt:
for c in node:
self.visit(c)
def visit_Struct(self, node):
if node.name == None:
node.name = self.__generate_new_typename()
def visit_Union(self, node):
if node.name == None:
node.name = self.__generate_new_typename()
def __generate_new_typename(self):
typename = ""
for i in range(5, sys.maxsize):
typename = "".join(random.choices(string.ascii_letters, k=i))
if typename not in self.unavailable_identifiers:
break
return typename
class IdentifierCollector(c_ast.NodeVisitor):
def __init__(self):
self.identifiers = set()
def generic_visit(self, node):
if type(node) != FuncDeclExt and type(node) != TypeDeclExt:
for c in node:
self.visit(c)
def visit_TypeDecl(self, node):
if (type(node.type) == c_ast.Struct or type(node.type) == c_ast.Union) and node.type.name is not None:
self.identifiers.add(node.type.name)
class PropertyReplacer(c_ast.NodeVisitor):
"""
Finds the verification property in the AST node and replaces it with the given one.
Searches for all compound blocks that contains a function call with the verifier error function and assert
statements. It then assembles the property via the surrounding if statement or the arguments of the assert.
Stores the properties as an ExprList whose elements should be understood as a conjunct.
"""
def __init__(self, new_property: c_ast.ExprList):
self.new_property = new_property
self.parents = []
def generic_visit(self, node):
self.parents.append(node)
for c_name, c in node.children():
self.visit(c)
self.parents = self.parents[:-1]
def visit_FuncCall(self, node):
# Case __VERIFIER_error was used.
if node.name.name == VERIFIER_ERROR_FUNCTION_NAME:
if type(self.parents[-1]) == c_ast.Compound and type(self.parents[-2]) == c_ast.If:
self.parents[-2].cond = self.new_property
# Case assert was used.
elif node.name.name == ASSERT_FUNCTION_NAME:
node.args = self.new_property
class DeclarationReplacer(c_ast.NodeVisitor):
"""
Replaces the initializer of the given declaration with the new constant integer value.
"""
def __init__(self, declaration: c_ast.Decl, new_value: int):
self.declaration = declaration
self.new_value = new_value
def visit_Decl(self, node):
if node.name == self.declaration.name:
node.init = c_ast.Constant("int", str(self.new_value))
class SliceFuncCallUpdater(c_ast.NodeVisitor):
"""
Searches for any function calls, and append "_slice_1" if they do not contain this string already. This needs to be
done for Frama-C slicing, as it changes the function names.
Note: This may not work on every possible slicing outcome, but should suffice for most of the cases.
"""
def visit_FuncCall(self, node):
if type(node.name) is c_ast.ID and (len(node.name.name) < 9 or node.name.name[-8] != "_slice_1"):
node.name.name = node.name.name + "_slice_1"
########################################################################################################################
# Transformer #
########################################################################################################################
class CTransformer:
def __init__(self, ast: c_ast.FileAST):
self.ast = ast
def deanonymize_aggregates(self):
"""
Deanonymizes all anonymous aggregates such that they are identified by a unique name. An example of an anonymous
aggregate: "typedef struct { int x; } s;", which will be changed to "typedef struct _s { int x; } s;" such that
it can be identified by the "_s". Hence, it is not anonymous anymore after this function call.
"""
collector = IdentifierCollector()
collector.visit(self.ast)
deanonymizer = AggregateDeanonymizer(collector.identifiers)
deanonymizer.visit(self.ast)
def create_havoc_block(self, declarations: list):
"""
Creates a block containing the non-deterministic assignments to the variables in the given declarations.
Respects the types of those variables, e.g. an integer will be assigned __VERIFIER_nondet_int(). Structs and
arrays will be unrolled and their members will be havoced.
:param variables: A list of c_ast.Decl.
:return: First entry: A set of strings containing the employed non-deterministic assignment SV comp function
names, e.g. "__VERIFIER_nondet_int". Second entry: A block containing all havoc assignments for that
variable.
:raise: NonSvCompTypeException in case a variable was given that can not be havoced.
:rtype: set of str, c_ast.Compound
"""
body_items = []
svcomp_havoc_functions = set()
# Creates the havoc assignment for each variable.
for declaration in declarations:
rec_svcomp_havoc_functions, havoc_block = self.create_havoc_assignment(declaration)
body_items.append(havoc_block)
svcomp_havoc_functions = svcomp_havoc_functions.union(rec_svcomp_havoc_functions)
# Bundles the havoc assignments into one compound statement.
return svcomp_havoc_functions, c_ast.Compound(body_items)
def create_havoc_assignment(self, declaration: c_ast.Decl, parent: c_ast.Node=None):
"""
Creates a havoc assignment block for the variable declared in the given declaration.
:param declaration: The declaration of the variable to havoc.
:return: First entry: A set of strings containing the employed non-deterministic assignment SV comp function
names, e.g. "__VERIFIER_nondet_int". Second entry: A block containing all havoc assignments for that
variable.
:parent: A parent node for aggregates to allow for access of the children. Either c_ast.StructRef,
c_ast.ArrayRef or c_ast.UnaryOp with op="*".
:rtype: set of str, c_ast.Compound
"""
# Here be dragons. Most likely contains some bugs.
# TODO Should be tested thoroughly.
body_items = []
svcomp_havoc_functions = set()
# First, registers itself into the parent struct, if there is one.
if type(parent) == c_ast.StructRef and parent.field is None:
parent.field = c_ast.ID(declaration.name)
# Checks for five main cases: We have a basic identifier, a struct, a union, an array or a pointer.
# If a compound type is encountered, this function is called recursively on the child declaration(s).
if type(declaration.type) == c_ast.TypeDecl:
# CASE STRUCT
if type(declaration.type.type) == c_ast.Struct:
# Iterates over every struct member and creates a havoc block for this. Useful for nested structs.
if declaration.type.type.decls:
for member in declaration.type.type.decls:
if parent is None:
new_parent = c_ast.StructRef(c_ast.ID(declaration.name), ".", None)
else:
new_parent = c_ast.StructRef(parent, ".", None)
rec_svcomp_havoc_funcs, rec_havoc_block = self.create_havoc_assignment(member, new_parent)
body_items.append(rec_havoc_block)
svcomp_havoc_functions = svcomp_havoc_functions.union(rec_svcomp_havoc_funcs)
# CASE UNION
elif type(declaration.type.type) == c_ast.Union and len(declaration.type.type.decls) > 0:
# For a union, we just havoc the very first member.
if parent is None:
new_parent = c_ast.StructRef(c_ast.ID(declaration.name), ".", None)
else:
new_parent = c_ast.StructRef(parent, ".", None)
rec_svcomp_havoc_funcs, rec_havoc_block = self.create_havoc_assignment(declaration.type.type.decls[0],
new_parent)
body_items.append(rec_havoc_block)
svcomp_havoc_functions = svcomp_havoc_functions.union(rec_svcomp_havoc_funcs)
# CASE BASIC IDENTIFIER
elif type(declaration.type.type) == c_ast.IdentifierType:
# Base case of the recursion.
havoc_function = VERIFIER_NONDET_FUNCTION_NAME + self.get_svcomp_type(declaration.type.type.names)
rvalue = self.create_function_call(havoc_function)
if parent is None:
lvalue = c_ast.ID(declaration.name)
else:
lvalue = parent
havoc_variable = c_ast.Assignment("=", lvalue, rvalue)
body_items.append(havoc_variable)
svcomp_havoc_functions.add(havoc_function)
# CASE ARRAY
elif type(declaration.type) == c_ast.ArrayDecl:
modified_declaration = copy.deepcopy(declaration)
modified_declaration.type = modified_declaration.type.type
if type(declaration.type.dim) == c_ast.Constant and declaration.type.dim.type == "int":
# Iterates over every member of the array (Thus, the size has to be constant).
for i in range(int(declaration.type.dim.value)):
if parent is None:
new_parent = c_ast.ID(declaration.name)
else:
new_parent = parent
rec_svcomp_havoc_funcs, rec_havoc_block = self.create_havoc_assignment(
modified_declaration,
c_ast.ArrayRef(new_parent, c_ast.Constant("int", str(i)))
)
body_items.append(rec_havoc_block)
svcomp_havoc_functions = svcomp_havoc_functions.union(rec_svcomp_havoc_funcs)
else:
sys.stderr.write("WARNING: Non-constant array encountered!") # TODO
# CASE POINTER
elif type(declaration.type) == c_ast.PtrDecl:
if type(declaration.type.type) == c_ast.TypeDecl and \
type(declaration.type.type.type) == c_ast.IdentifierType and \
("const" not in declaration.type.quals or "void" in declaration.type.type.type.names):
# Base case of the recursion. Only entered if we can not dereference the pointer due to either an
# unknown type (void pointer) or a constant memory location behind the pointer.
havoc_function = VERIFIER_NONDET_FUNCTION_NAME + "pointer"
svcomp_havoc_functions.add(havoc_function)
rvalue = self.create_function_call(havoc_function)
if parent is None:
lvalue = c_ast.ID(declaration.name)
else:
lvalue = parent
havoc_variable = c_ast.Assignment("=", lvalue, rvalue)
body_items.append(havoc_variable)
else:
# We can dereference the pointer: Does so and creates a havoc statement for the type behind the pointer.
modified_declaration = copy.deepcopy(declaration)
modified_declaration.type = modified_declaration.type.type
if parent is None:
new_parent = c_ast.ID(declaration.name)
else:
new_parent = parent
rec_svcomp_havoc_funcs, rec_havoc_block = self.create_havoc_assignment(modified_declaration,
c_ast.UnaryOp("*", new_parent))
body_items.append(rec_havoc_block)
svcomp_havoc_functions = svcomp_havoc_functions.union(rec_svcomp_havoc_funcs)
# Bundles the havoc assignments into one compound statement.
if len(body_items) == 0:
sys.stderr.write("WARNING: Could not havoc variable of declaration " + GnuCGenerator().visit(declaration) +
"\n")
return svcomp_havoc_functions, c_ast.Compound(body_items)
def get_svcomp_type(self, type_names: list):
"""
Searches for the corresponding SV comp type for the given list of C types.
:param type_names: A list of strings containing the C types, e.g. ["unsigned", "int"].
:raise: NonSvCompTypeException in case the SV comp type could not be identified.
:return: A string representing the SV comp type, e.g. "uint".
"""
svcomp_type = None
type_names = set(type_names)
# Implements the translation from C types to SV comp types.
if "bool" in type_names:
svcomp_type = "bool"
elif "float" in type_names:
svcomp_type = "float"
elif "double" in type_names:
svcomp_type = "double"
elif "loff_t" in type_names:
svcomp_type = "loff_t"
elif "pchar" in type_names:
svcomp_type = "pchar"
elif "pthread_t" in type_names:
svcomp_type = "pthread_t"
elif "sector_t" in type_names:
svcomp_type = "sector_t"
elif "size_t" in type_names:
svcomp_type = "size_t"
elif "u32" in type_names:
svcomp_type = "u32"
elif "char" in type_names:
svcomp_type = "char"
elif "short" in type_names:
svcomp_type = "short"
elif "long" in type_names:
svcomp_type = "long"
elif "int" in type_names:
svcomp_type = "int"
if "unsigned" in type_names:
svcomp_type = "u" + svcomp_type
if not svcomp_type:
print(type_names)
raise NonSvCompTypeException(" ".join(type_names))
return svcomp_type
def get_c_type(self, svcomp_type: str):
"""
Searches for the corresponding list of C types for the given SV comp type.
:param type_names: A string representing the SV comp type, e.g. "uint".
:raise: NonSvTypeException in case the SV comp type could not be identified.
:return: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.