input
stringlengths
2.65k
237k
output
stringclasses
1 value
import json import struct class ClassFile: def __init__(self): pass @staticmethod def serialize(d): r = {} magic = d[:4] d = d[4:] minor = int.from_bytes(d[:2], "big") d = d[2:] major = int.from_bytes(d[:2], "big") d = d[2:] r["version"] = {"major": major, "minor": minor} r["pool"], d = ConstPool.serialize(d) r["access_flags"] = int.from_bytes(d[:2], "big") d = d[2:] r["this_class"] = int.from_bytes(d[:2], "big") d = d[2:] r["super_class"] = int.from_bytes(d[:2], "big") d = d[2:] r["interfaces"] = [] ic = int.from_bytes(d[:2], "big") d = d[2:] for i in range(ic): r["interfaces"].append(int.from_bytes(d[:2], "big")) d = d[2:] r["fields"] = [] fc = int.from_bytes(d[:2], "big") d = d[2:] for i in range(fc): f, d = Field.serialize(d, r["pool"]) r["fields"].append(f) r["methods"] = [] mc = int.from_bytes(d[:2], "big") d = d[2:] for i in range(mc): m, d = Method.serialize(d, r["pool"]) r["methods"].append(m) r["attributes"] = [] ac = int.from_bytes(d[:2], "big") d = d[2:] for i in range(ac): a, d = Attribute.serialize(d, r["pool"]) r["attributes"].append(a) return r @staticmethod def deserialize(d): r = b"" r += bytes.fromhex("CAFEBABE") r += d["version"]["minor"].to_bytes(2, "big") r += d["version"]["major"].to_bytes(2, "big") r += ConstPool.deserialize(d["pool"]) r += d["access_flags"].to_bytes(2, "big") r += d["this_class"].to_bytes(2, "big") r += d["super_class"].to_bytes(2, "big") r += len(d["interfaces"]).to_bytes(2, "big") for i in d["interfaces"]: r += i.to_bytes(2, "big") r += len(d["fields"]).to_bytes(2, "big") for f in d["fields"]: r += Field.deserialize(f) r += len(d["methods"]).to_bytes(2, "big") for m in d["methods"]: r += Method.deserialize(m) r += len(d["attributes"]).to_bytes(2, "big") for a in d["attributes"]: r += Attribute.deserialize(a) return r class ConstPool: def __init__(self): pass @staticmethod def serialize(d): r = [] l = int.from_bytes(d[:2], "big") d = d[2:] i = 1 while i < l: t = int.from_bytes(d[:1], "big") d = d[1:] if t == 1: #UTF8 sl = int.from_bytes(d[:2], "big") d = d[2:] s = d[:sl].decode("utf8") d = d[sl:] r.append({"index": i, "type": "utf8", "data": s}) elif t == 3: #Integer n = struct.unpack(">i", d[:4])[0] d = d[4:] r.append({"index": i, "type": "int", "data": n}) elif t == 4: #Float f = struct.unpack(">f", d[:4])[0] d = d[4:] r.append({"index": i, "type": "float", "data": f}) elif t == 5: #Long n = struct.unpack(">q", d[:8])[0] d = d[8:] r.append({"index": i, "type": "long", "data": n}) i += 1 elif t == 6: #Double n = struct.unpack(">d", d[:8])[0] d = d[8:] r.append({"index": i, "type": "double", "data": n}) i += 1 elif t == 7: #Class n = int.from_bytes(d[:2], "big") d = d[2:] r.append({"index": i, "type": "class", "data": n}) elif t == 8: #String n = int.from_bytes(d[:2], "big") d = d[2:] r.append({"index": i, "type": "string", "data": n}) elif t == 9: #Field c = int.from_bytes(d[:2], "big") d = d[2:] nt = int.from_bytes(d[:2], "big") d = d[2:] r.append({"index": i, "type": "field", "data": {"class": c, "name_type": nt}}) elif t == 10: #Method c = int.from_bytes(d[:2], "big") d = d[2:] nt = int.from_bytes(d[:2], "big") d = d[2:] r.append({"index": i, "type": "method", "data": {"class": c, "name_type": nt}}) elif t == 11: #Interface method c = int.from_bytes(d[:2], "big") d = d[2:] nt = int.from_bytes(d[:2], "big") d = d[2:] r.append({"index": i, "type": "interface_method", "data": {"class": c, "name_type": nt}}) elif t == 12: #Name and type n = int.from_bytes(d[:2], "big") d = d[2:] nt = int.from_bytes(d[:2], "big") d = d[2:] r.append({"index": i, "type": "name_type", "data": {"name": n, "type": nt}}) elif t == 15: #Method handle rk = int.from_bytes(d[:1], "big") d = d[1:] ri = int.from_bytes(d[:2], "big") d = d[2:] r.append({"index": i, "type": "method_handle", "data": {"reference_kind": rk, "reference_index": ri}}) elif t == 16: #Method type di = int.from_bytes(d[:2], "big") d = d[2:] r.append({"index": i, "type": "method_type", "data": di}) elif t == 18: #Invoke dynamic bi = int.from_bytes(d[:2], "big") d = d[2:] nt = int.from_bytes(d[:2], "big") d = d[2:] r.append({"index": i, "type": "invoke_dynamic", "data": {"bootstrap_method_attr_index": bi, "name_type": nt}}) i += 1 return r, d @staticmethod def deserialize(d): r = b"" r += (d[-1]["index"] + 1).to_bytes(2, "big") m = 0 for e in d: if e["index"] > m: m = e["index"] for i in range(1, m + 1): for e in d: if e["index"] == i: t = e["type"] if t == "utf8": s = e["data"].encode("utf8") r += b"\x01" + len(s).to_bytes(2, "big") + s elif t == "int": r += b"\x03" + struct.pack(">i", e["data"]) elif t == "float": r += b"\x04" + struct.pack(">f", e["data"]) elif t == "long": r += b"\x05" + struct.pack(">q", e["data"]) elif t == "double": r += b"\x06" + struct.pack(">d", e["data"]) elif t == "class": r += b"\x07" + e["data"].to_bytes(2, "big") elif t == "string": r += b"\x08" + e["data"].to_bytes(2, "big") elif t == "field": r += b"\x09" + e["data"]["class"].to_bytes(2, "big") + e["data"]["name_type"].to_bytes(2, "big") elif t == "method": r += b"\x0A" + e["data"]["class"].to_bytes(2, "big") + e["data"]["name_type"].to_bytes(2, "big") elif t == "interface_method": r += b"\x0B" + e["data"]["class"].to_bytes(2, "big") + e["data"]["name_type"].to_bytes(2, "big") elif t == "name_type": r += b"\x0C" + e["data"]["name"].to_bytes(2, "big") + e["data"]["type"].to_bytes(2, "big") elif t == "method_handle": r += b"\x0F" + e["data"]["reference_kind"].to_bytes(1, "big") + e["data"]["reference_index"].to_bytes(2, "big") elif t == "method_type": r += b"\x10" + e["data"].to_bytes(2, "big") elif t == "invoke_dynamic": r += b"\x12" + e["data"]["bootstrap_method_attr_index"].to_bytes(2, "big") + e["data"]["name_type"].to_bytes(2, "big") break return r class Field: def __init__(self): pass @staticmethod def serialize(d, cpool): r = {} r["access_flags"] = int.from_bytes(d[:2], "big") d = d[2:] r["name"] = int.from_bytes(d[:2], "big") d = d[2:] r["type"] = int.from_bytes(d[:2], "big") d = d[2:] ac = int.from_bytes(d[:2], "big") d = d[2:] r["attributes"] = [] for i in range(ac): a, d = Attribute.serialize(d, cpool) r["attributes"].append(a) return r, d @staticmethod def deserialize(d): r = b"" r += d["access_flags"].to_bytes(2, "big") r += d["name"].to_bytes(2, "big") r += d["type"].to_bytes(2, "big") r += len(d["attributes"]).to_bytes(2, "big") for a in d["attributes"]: r += Attribute.deserialize(a) return r class Method: def __init__(self): pass @staticmethod def serialize(d, cpool): r = {} r["access_flags"] = int.from_bytes(d[:2], "big") d = d[2:] r["name"] = int.from_bytes(d[:2], "big") d = d[2:] r["type"] = int.from_bytes(d[:2], "big") d = d[2:] ac = int.from_bytes(d[:2], "big") d = d[2:] r["attributes"] = [] for i in range(ac): a, d = Attribute.serialize(d, cpool) r["attributes"].append(a) return r, d @staticmethod def deserialize(d): r = b"" r += d["access_flags"].to_bytes(2, "big") r += d["name"].to_bytes(2, "big") r += d["type"].to_bytes(2, "big") r += len(d["attributes"]).to_bytes(2, "big") for a in d["attributes"]: r += Attribute.deserialize(a) return r class Attribute: def __init__(self): pass @staticmethod def serialize(d, cpool): r = {"type": None} r["name_index"] = int.from_bytes(d[:2], "big") d = d[2:] t = None for x in cpool: if x["index"] == r["name_index"]: t = x["data"] l = int.from_bytes(d[:4], "big") d = d[4:] c = False if t: c = True if t.lower() == "constantvalue": r["data"] = int.from_bytes(d[:2], "big") d = d[2:] elif t.lower() == "code": r["data"] = {} r["data"]["max_stack"] = int.from_bytes(d[:2], "big") d = d[2:] r["data"]["max_locals"] = int.from_bytes(d[:2], "big") d = d[2:] codelen = int.from_bytes(d[:4], "big") d = d[4:] code = d[:codelen] d = d[codelen:] r["data"]["code"] = code.hex() elen = int.from_bytes(d[:2], "big") d = d[2:] r["data"]["exceptions"] = [] for i in range(elen): e = {} e["start_pc"] = int.from_bytes(d[:2], "big") d = d[2:] e["end_pc"] = int.from_bytes(d[:2], "big") d = d[2:] e["handler_pc"] = int.from_bytes(d[:2], "big") d = d[2:] e["catch_type"] = int.from_bytes(d[:2], "big") d = d[2:] r["data"]["exceptions"].append(e) ac = int.from_bytes(d[:2], "big") d = d[2:] r["data"]["attributes"] = [] for i in range(ac): a, d = Attribute.serialize(d, cpool) r["data"]["attributes"].append(a) elif t.lower() == "exceptions": r["data"] = [] elen = int.from_bytes(d[:2], "big") d = d[2:] for i in range(elen): r["data"].append(int.from_bytes(d[:2], "big")) d = d[2:] elif t.lower() == "innerclasses": r["data"] = [] ilen = int.from_bytes(d[:2], "big") d = d[2:] for i in range(ilen): cl = {} cl["inner_class_info_index"] = int.from_bytes(d[:2], "big") d = d[2:] cl["outer_class_info_index"] = int.from_bytes(d[:2], "big") d = d[2:] cl["inner_name_index"] = int.from_bytes(d[:2], "big") d = d[2:] cl["inner_class_access_flags"] = int.from_bytes(d[:2], "big") d =
import numpy as np from pyannote.audio.keras_utils import load_model from pyannote.audio.signal import Binarize, Peak from pyannote.audio.features import Precomputed import my_cluster from pyannote.core import Annotation from pyannote.audio.embedding.utils import l2_normalize from pyannote.database import get_annotated class SpeakerDiarizationPre(object): '''Speaker diarization with affinity propagation''' def __init__(self, feature_extraction, sad__pre, scd__pre, emb__pre, sad__onset=0.7, sad__offset=0.7, sad__dimension=1, scd__alpha=0.5, scd__min_duration=1., scd__dimension=1, emb__internal=False, cls__damping=0.8, cls__preference=-20, cls__metric='cosine'): super(SpeakerDiarizationPre, self).__init__() self.feature_extraction = feature_extraction # speech activity detection hyper-parameters self.sad__onset = sad__onset self.sad__offset = sad__offset self.sad__dimension = sad__dimension # speaker change detection hyper-parameters self.scd__alpha = scd__alpha self.scd__min_duration = scd__min_duration self.scd__dimension = scd__dimension # embedding hyper-parameters self.emb__internal = emb__internal # clustering hyper-parameters self.cls__damping = cls__damping self.cls__preference = cls__preference self.cls__metric = cls__metric step = self.feature_extraction.sliding_window().step # initialize speech activity detection module self.sad_ = Precomputed(sad__pre) self.sad_binarize_ = Binarize(onset=self.sad__onset, offset=self.sad__offset) # initialize speaker change detection module self.scd_ = Precomputed(scd__pre) self.scd_peak_ = Peak(alpha=self.scd__alpha, min_duration=self.scd__min_duration, percentile=False) # initialize speech turn embedding module self.emb_ = Precomputed(emb__pre) # initialize clustering module self.cls_ = my_cluster.ClusteringAP(metric=self.cls__metric, damping=self.cls__damping, preference=self.cls__preference) def __call__(self, current_file, annotated=False): # speech activity detection soft_sad = self.sad_(current_file) hard_sad = self.sad_binarize_.apply( soft_sad, dimension=self.sad__dimension) # speaker change detection soft_scd = self.scd_(current_file) hard_scd = self.scd_peak_.apply( soft_scd, dimension=self.scd__dimension) # speech turns speech_turns = hard_scd.crop(hard_sad) if annotated: speech_turns = speech_turns.crop( get_annotated(current_file)) # remove small speech turns emb = self.emb_(current_file) speech_turns = [speech_turn for speech_turn in speech_turns if len(emb.crop(speech_turn, mode='loose')) > 0] # speech turns embedding to_stack = [ np.sum(emb.crop(speech_turn, mode='loose'), axis=0) for speech_turn in speech_turns] if len(to_stack) < 1: return None fX = l2_normalize(np.vstack(to_stack)) # speech turn clustering cluster_labels = self.cls_.apply(fX) # build hypothesis from clustering results hypothesis = Annotation(uri=current_file['uri']) for speech_turn, label in zip(speech_turns, cluster_labels): hypothesis[speech_turn] = label return hypothesis class SpeakerDiarizationOracleSegAP(object): '''Speaker diarization with oracle segmentation and affinity propagation''' def __init__(self, feature_extraction, emb__pre, emb__internal=False, cls__damping=0.8, cls__preference=-20, cls__metric='cosine'): super(SpeakerDiarizationOracleSegAP, self).__init__() self.feature_extraction = feature_extraction # embedding hyper-parameters self.emb__internal = emb__internal # clustering hyper-parameters self.cls__damping = cls__damping self.cls__preference = cls__preference self.cls__metric = cls__metric step = self.feature_extraction.sliding_window().step # initialize speech turn embedding module self.emb_ = Precomputed(emb__pre) # initialize clustering module self.cls_ = my_cluster.ClusteringAP(metric=self.cls__metric, damping=self.cls__damping, preference=self.cls__preference) def __call__(self, current_file, annotated=False): # speech turns speech_turns = current_file['annotation'].get_timeline() if annotated: speech_turns = speech_turns.crop( get_annotated(current_file)) # remove small speech turns emb = self.emb_(current_file) speech_turns = [speech_turn for speech_turn in speech_turns if len(emb.crop(speech_turn, mode='loose')) > 0] # speech turns embedding to_stack = [ np.sum(emb.crop(speech_turn, mode='loose'), axis=0) for speech_turn in speech_turns] if len(to_stack) < 1: return None fX = l2_normalize(np.vstack(to_stack)) # speech turn clustering cluster_labels = self.cls_.apply(fX) # build hypothesis from clustering results hypothesis = Annotation(uri=current_file['uri']) for speech_turn, label in zip(speech_turns, cluster_labels): hypothesis[speech_turn] = label return hypothesis class SpeakerDiarizationHACPre(object): '''Speaker diarization with hierarchical agglomerative clustering''' def __init__(self, feature_extraction, sad__pre, scd__pre, emb__pre, sad__onset=0.7, sad__offset=0.7, sad__dimension=1, scd__alpha=0.5, scd__min_duration=1., scd__dimension=1, emb__internal=False, cls__method='average', cls__threshold=5, cls__metric='cosine'): super(SpeakerDiarizationHACPre, self).__init__() self.feature_extraction = feature_extraction # speech activity detection hyper-parameters self.sad__onset = sad__onset self.sad__offset = sad__offset self.sad__dimension = sad__dimension # speaker change detection hyper-parameters self.scd__alpha = scd__alpha self.scd__min_duration = scd__min_duration self.scd__dimension = scd__dimension # embedding hyper-parameters self.emb__internal = emb__internal # clustering hyper-parameters self.cls__method = cls__method self.cls__threshold = cls__threshold self.cls__metric = cls__metric step = self.feature_extraction.sliding_window().step # initialize speech activity detection module self.sad_ = Precomputed(sad__pre) self.sad_binarize_ = Binarize(onset=self.sad__onset, offset=self.sad__offset) # initialize speaker change detection module self.scd_ = Precomputed(scd__pre) self.scd_peak_ = Peak(alpha=self.scd__alpha, min_duration=self.scd__min_duration, percentile=False) # initialize speech turn embedding module self.emb_ = Precomputed(emb__pre) # initialize clustering module self.cls_ = my_cluster.ClusteringHAC(metric=self.cls__metric, method=self.cls__method, threshold=self.cls__threshold) def __call__(self, current_file, annotated=False): # speech activity detection soft_sad = self.sad_(current_file) hard_sad = self.sad_binarize_.apply( soft_sad, dimension=self.sad__dimension) # speaker change detection soft_scd = self.scd_(current_file) hard_scd = self.scd_peak_.apply( soft_scd, dimension=self.scd__dimension) # speech turns speech_turns = hard_scd.crop(hard_sad) if annotated: speech_turns = speech_turns.crop( get_annotated(current_file)) # remove small speech turns emb = self.emb_(current_file) speech_turns = [speech_turn for speech_turn in speech_turns if len(emb.crop(speech_turn, mode='loose')) > 0] # speech turns embedding to_stack = [ np.sum(emb.crop(speech_turn, mode='loose'), axis=0) for speech_turn in speech_turns] if len(to_stack) < 1: return None fX = l2_normalize(np.vstack(to_stack)) # speech turn clustering cluster_labels = self.cls_.apply(fX) # build hypothesis from clustering results hypothesis = Annotation(uri=current_file['uri']) for speech_turn, label in zip(speech_turns, cluster_labels): hypothesis[speech_turn] = label return hypothesis class SpeakerDiarizationPreStages(object): def __init__(self, feature_extraction, sad__pre, scd__pre, emb__pre, sad__onset=0.7, sad__offset=0.7, sad__dimension=1, scd__alpha=0.5, scd__min_duration=1., scd__dimension=1, emb__internal=False, cls__damping=0.8, cls__preference=-20, cls__metric='cosine'): super(SpeakerDiarizationPreStages, self).__init__() self.feature_extraction = feature_extraction # speech activity detection hyper-parameters self.sad__onset = sad__onset self.sad__offset = sad__offset self.sad__dimension = sad__dimension # speaker change detection hyper-parameters self.scd__alpha = scd__alpha self.scd__min_duration = scd__min_duration self.scd__dimension = scd__dimension # embedding hyper-parameters self.emb__internal = emb__internal # clustering hyper-parameters self.cls__damping = cls__damping self.cls__preference = cls__preference self.cls__metric = cls__metric step = self.feature_extraction.sliding_window().step # initialize speech activity detection module self.sad_ = Precomputed(sad__pre) self.sad_binarize_ = Binarize(onset=self.sad__onset, offset=self.sad__offset) # initialize speaker change detection module self.scd_ = Precomputed(scd__pre) self.scd_peak_ = Peak(alpha=self.scd__alpha, min_duration=self.scd__min_duration, percentile=False) # initialize speech turn embedding module self.emb_ = Precomputed(emb__pre) # initialize clustering module self.cls_ = my_cluster.ClusteringAP(metric=self.cls__metric, damping=self.cls__damping, preference=self.cls__preference) def __call__(self, current_file, annotated=False): # speech activity detection soft_sad = self.sad_(current_file) hard_sad = self.sad_binarize_.apply( soft_sad, dimension=self.sad__dimension) sad_output = hard_sad.to_annotation() # speaker change detection soft_scd = self.scd_(current_file) hard_scd = self.scd_peak_.apply( soft_scd, dimension=self.scd__dimension) # speech turns speech_turns = hard_scd.crop(hard_sad) scd_output = speech_turns.to_annotation() if annotated: speech_turns = speech_turns.crop( get_annotated(current_file)) # remove small speech turns emb = self.emb_(current_file) speech_turns = [speech_turn for speech_turn in speech_turns if len(emb.crop(speech_turn, mode='loose')) > 0] # speech turns embedding to_stack = [ np.sum(emb.crop(speech_turn, mode='loose'), axis=0) for speech_turn in speech_turns] if len(to_stack) < 1: return None fX = l2_normalize(np.vstack(to_stack)) # speech turn clustering cluster_labels = self.cls_.apply(fX) # build hypothesis from clustering results hypothesis = Annotation(uri=current_file['uri']) for speech_turn, label in zip(speech_turns, cluster_labels): hypothesis[speech_turn] = label return hypothesis, sad_output, scd_output class SpeakerDiarizationWeighted(object): def __init__(self, feature_extraction, sad__pre, scd__pre, weight__pre, emb__pre, sad__onset=0.7, sad__offset=0.7, sad__dimension=1, scd__alpha=0.5, scd__min_duration=1., scd__dimension=1, emb__internal=False, cls__damping=0.8, cls__preference=-20, cls__metric='cosine'): super(SpeakerDiarizationWeighted, self).__init__() self.feature_extraction = feature_extraction # speech activity detection hyper-parameters self.sad__onset = sad__onset self.sad__offset = sad__offset self.sad__dimension = sad__dimension # speaker change detection hyper-parameters self.scd__alpha = scd__alpha self.scd__min_duration = scd__min_duration self.scd__dimension = scd__dimension # embedding hyper-parameters self.emb__internal = emb__internal # clustering hyper-parameters self.cls__damping = cls__damping self.cls__preference = cls__preference self.cls__metric = cls__metric step = self.feature_extraction.sliding_window().step # initialize speech activity detection module self.sad_ = Precomputed(sad__pre) self.sad_binarize_ = Binarize(onset=self.sad__onset, offset=self.sad__offset) # initialize speaker change detection module self.scd_ = Precomputed(scd__pre) self.scd_peak_ = Peak(alpha=self.scd__alpha, min_duration=self.scd__min_duration, percentile=False) # initialize weights self.weight_ = Precomputed(weight__pre) # initialize speech turn embedding module self.emb_ = Precomputed(emb__pre) # initialize clustering module self.cls_ = my_cluster.ClusteringAP(metric=self.cls__metric, damping=self.cls__damping, preference=self.cls__preference) def __call__(self, current_file, annotated=False): # speech activity detection soft_sad = self.sad_(current_file) hard_sad = self.sad_binarize_.apply( soft_sad, dimension=self.sad__dimension) # speaker change detection soft_scd = self.scd_(current_file) hard_scd = self.scd_peak_.apply( soft_scd, dimension=self.scd__dimension) # speech turns speech_turns = hard_scd.crop(hard_sad) if annotated: speech_turns = speech_turns.crop( get_annotated(current_file)) # remove small speech turns emb = self.emb_(current_file) speech_turns = [speech_turn for speech_turn in speech_turns if len(emb.crop(speech_turn, mode='loose')) > 0] # weights weight = self.weight_(current_file) # speech turns embedding to_stack = [ np.mean(emb.crop(speech_turn, mode='loose')*(1-weight.crop(speech_turn, mode='loose')), axis=0) for speech_turn in speech_turns] if len(to_stack) < 1: return None fX = l2_normalize(np.vstack(to_stack)) # speech turn clustering cluster_labels = self.cls_.apply(fX) # build hypothesis from clustering results hypothesis = Annotation(uri=current_file['uri']) for speech_turn, label in zip(speech_turns, cluster_labels): hypothesis[speech_turn] = label return hypothesis class SpeakerDiarizationOnSceneHAC(object): def __init__(self, emb__pre, cls__method='average', cls__threshold=5, cls__metric='cosine'): super(SpeakerDiarizationOnSceneHAC, self).__init__() # clustering hyper-parameters self.cls__method = cls__method self.cls__threshold = cls__threshold self.cls__metric = cls__metric # initialize speech turn embedding module self.emb_ = Precomputed(emb__pre) # initialize clustering module self.cls_ = my_cluster.ClusteringHAC(metric=self.cls__metric, method=self.cls__method, threshold=self.cls__threshold) def __call__(self, current_file): # speech turns hypothesis = Annotation(uri=current_file['uri']) sencences = current_file['speech_timeline'] scenes = current_file['scenes'] # remove small speech turns emb = self.emb_(current_file) #speech_turns = [speech_turn for speech_turn in speech_turns if len(emb.crop(speech_turn, mode='loose')) > 0] for scene in scenes: speech_turns = sencences.crop(scene) if len(speech_turns) == 0: continue if len(speech_turns) == 1: hypothesis[speech_turns[0]] = 1 continue # speech turns embedding to_stack = [ np.sum(emb.crop(speech_turn, mode='loose'), axis=0) for speech_turn in speech_turns] fX = l2_normalize(np.vstack(to_stack)) # speech turn clustering cluster_labels = self.cls_.apply(fX) # build hypothesis from clustering results for speech_turn, label in zip(speech_turns, cluster_labels): hypothesis[speech_turn] = label return hypothesis class SpeakerDiarizationOnEnrollHAC(object): def __init__(self, cls__method='average', cls__threshold=5, cls__metric='cosine'): super(SpeakerDiarizationOnEnrollHAC, self).__init__() # clustering hyper-parameters self.cls__method = cls__method self.cls__threshold = cls__threshold self.cls__metric = cls__metric # initialize clustering module self.cls_ = my_cluster.ClusteringHAC(metric=self.cls__metric, method=self.cls__method, threshold=self.cls__threshold) def __call__(self, embedding, speech_turns): hypothesis = Annotation()
<reponame>nanjekyejoannah/pypy """ Libffi wrapping """ from __future__ import with_statement from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.tool import rffi_platform from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.rarithmetic import intmask, is_emulated_long from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rmmap import alloc from rpython.rlib.rdynload import dlopen, dlclose, dlsym, dlsym_byordinal from rpython.rlib.rdynload import DLOpenError, DLLHANDLE from rpython.rlib import jit, rposix from rpython.rlib.objectmodel import specialize from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.platform import platform from rpython.translator import cdir from platform import machine import py import os import sys import ctypes.util # maaaybe isinstance here would be better. Think _MSVC = platform.name == "msvc" _MINGW = platform.name == "mingw32" _WIN32 = _MSVC or _MINGW _WIN64 = _WIN32 and is_emulated_long _MAC_OS = platform.name == "darwin" _LITTLE_ENDIAN = sys.byteorder == 'little' _BIG_ENDIAN = sys.byteorder == 'big' _ARM = rffi_platform.getdefined('__arm__', '') if _WIN32: from rpython.rlib import rwin32 separate_module_sources = [''' #include <stdio.h> #include <windows.h> /* Get the module where the "fopen" function resides in */ RPY_EXTERN HMODULE pypy_get_libc_handle(void) { MEMORY_BASIC_INFORMATION mi; char buf[1000]; memset(&mi, 0, sizeof(mi)); if( !VirtualQueryEx(GetCurrentProcess(), &fopen, &mi, sizeof(mi)) ) return (HMODULE)0; GetModuleFileName((HMODULE)mi.AllocationBase, buf, 500); return (HMODULE)mi.AllocationBase; } '''] post_include_bits = ['RPY_EXTERN HMODULE pypy_get_libc_handle(void);\n',] else: separate_module_sources = [] post_include_bits = [] if not _WIN32: includes = ['ffi.h'] if _MAC_OS: pre_include_bits = ['#define MACOSX'] else: pre_include_bits = [] libraries = ['ffi'] link_files = [] eci = ExternalCompilationInfo( pre_include_bits = pre_include_bits, includes = includes, libraries = libraries, separate_module_sources = separate_module_sources, post_include_bits = post_include_bits, include_dirs = platform.include_dirs_for_libffi(), library_dirs = platform.library_dirs_for_libffi(), link_files = link_files, testonly_libraries = ['ffi'], ) elif _MINGW: includes = ['ffi.h'] libraries = ['libffi-5'] eci = ExternalCompilationInfo( libraries = libraries, includes = includes, separate_module_sources = separate_module_sources, post_include_bits = post_include_bits, ) eci = rffi_platform.configure_external_library( 'ffi-5', eci, [dict(prefix='libffi-', include_dir='include', library_dir='.libs'), dict(prefix=r'c:\\mingw64', include_dir='include', library_dir='lib'), ]) else: eci = ExternalCompilationInfo( includes = ['ffi.h', 'windows.h'], libraries = ['kernel32', 'libffi-8'], separate_module_sources = separate_module_sources, post_include_bits = post_include_bits, ) FFI_TYPE_P = lltype.Ptr(lltype.ForwardReference()) FFI_TYPE_PP = rffi.CArrayPtr(FFI_TYPE_P) FFI_TYPE_NULL = lltype.nullptr(FFI_TYPE_P.TO) class CConfig: _compilation_info_ = eci FFI_OK = rffi_platform.ConstantInteger('FFI_OK') FFI_BAD_TYPEDEF = rffi_platform.ConstantInteger('FFI_BAD_TYPEDEF') FFI_DEFAULT_ABI = rffi_platform.ConstantInteger('FFI_DEFAULT_ABI') if _WIN32 and not _WIN64: FFI_STDCALL = rffi_platform.ConstantInteger('FFI_STDCALL') if _ARM: FFI_SYSV = rffi_platform.ConstantInteger('FFI_SYSV') FFI_VFP = rffi_platform.ConstantInteger('FFI_VFP') FFI_TYPE_STRUCT = rffi_platform.ConstantInteger('FFI_TYPE_STRUCT') size_t = rffi_platform.SimpleType("size_t", rffi.ULONG) ffi_abi = rffi_platform.SimpleType("ffi_abi", rffi.USHORT) ffi_arg = rffi_platform.SimpleType("ffi_arg", lltype.Signed) ffi_type = rffi_platform.Struct('ffi_type', [('size', rffi.ULONG), ('alignment', rffi.USHORT), ('type', rffi.USHORT), ('elements', FFI_TYPE_PP)]) ffi_cif = rffi_platform.Struct('ffi_cif', []) ffi_closure = rffi_platform.Struct('ffi_closure', [('user_data', rffi.VOIDP)]) def add_simple_type(type_name): for name in ['size', 'alignment', 'type']: setattr(CConfig, type_name + '_' + name, rffi_platform.ConstantInteger(type_name + '.' + name)) def configure_simple_type(type_name): l = lltype.malloc(FFI_TYPE_P.TO, flavor='raw', immortal=True) for tp, name in [(size_t, 'size'), (rffi.USHORT, 'alignment'), (rffi.USHORT, 'type')]: value = getattr(cConfig, '%s_%s' % (type_name, name)) setattr(l, 'c_' + name, rffi.cast(tp, value)) l.c_elements = lltype.nullptr(FFI_TYPE_PP.TO) return l base_names = ['double', 'uchar', 'schar', 'sshort', 'ushort', 'uint', 'sint', # ffi_type_slong and ffi_type_ulong are omitted because # their meaning changes too much from one libffi version to # another. DON'T USE THEM! use cast_type_to_ffitype(). 'float', 'longdouble', 'pointer', 'void', # by size 'sint8', 'uint8', 'sint16', 'uint16', 'sint32', 'uint32', 'sint64', 'uint64'] type_names = ['ffi_type_%s' % name for name in base_names] for i in type_names: add_simple_type(i) class cConfig: pass for k, v in rffi_platform.configure(CConfig).items(): setattr(cConfig, k, v) FFI_TYPE_P.TO.become(cConfig.ffi_type) size_t = cConfig.size_t FFI_ABI = cConfig.ffi_abi ffi_arg = cConfig.ffi_arg for name in type_names: locals()[name] = configure_simple_type(name) def _signed_type_for(TYPE): sz = rffi.sizeof(TYPE) if sz == 1: return ffi_type_sint8 elif sz == 2: return ffi_type_sint16 elif sz == 4: return ffi_type_sint32 elif sz == 8: return ffi_type_sint64 else: raise ValueError("unsupported type size for %r" % (TYPE,)) def _unsigned_type_for(TYPE): sz = rffi.sizeof(TYPE) if sz == 1: return ffi_type_uint8 elif sz == 2: return ffi_type_uint16 elif sz == 4: return ffi_type_uint32 elif sz == 8: return ffi_type_uint64 else: raise ValueError("unsupported type size for %r" % (TYPE,)) __int_type_map = [ (rffi.UCHAR, ffi_type_uchar), (rffi.SIGNEDCHAR, ffi_type_schar), (rffi.SHORT, ffi_type_sshort), (rffi.USHORT, ffi_type_ushort), (rffi.UINT, ffi_type_uint), (rffi.INT, ffi_type_sint), # xxx don't use ffi_type_slong and ffi_type_ulong - their meaning # changes from a libffi version to another :-(( (rffi.ULONG, _unsigned_type_for(rffi.ULONG)), (rffi.LONG, _signed_type_for(rffi.LONG)), (rffi.ULONGLONG, _unsigned_type_for(rffi.ULONGLONG)), (rffi.LONGLONG, _signed_type_for(rffi.LONGLONG)), (lltype.UniChar, _unsigned_type_for(lltype.UniChar)), (lltype.Bool, _unsigned_type_for(lltype.Bool)), (lltype.Char, _signed_type_for(lltype.Char)), ] __float_type_map = [ (rffi.DOUBLE, ffi_type_double), (rffi.FLOAT, ffi_type_float), (rffi.LONGDOUBLE, ffi_type_longdouble), ] __ptr_type_map = [ (rffi.VOIDP, ffi_type_pointer), ] __type_map = __int_type_map + __float_type_map + [ (lltype.Void, ffi_type_void) ] TYPE_MAP_INT = dict(__int_type_map) TYPE_MAP_FLOAT = dict(__float_type_map) TYPE_MAP = dict(__type_map) ffitype_map_int = unrolling_iterable(__int_type_map) ffitype_map_int_or_ptr = unrolling_iterable(__int_type_map + __ptr_type_map) ffitype_map_float = unrolling_iterable(__float_type_map) ffitype_map = unrolling_iterable(__type_map) del __int_type_map, __float_type_map, __ptr_type_map, __type_map def external(name, args, result, **kwds): return rffi.llexternal(name, args, result, compilation_info=eci, **kwds) def winexternal(name, args, result): return rffi.llexternal(name, args, result, compilation_info=eci, calling_conv='win') if 1 or not _MSVC: def check_fficall_result(result, flags): pass # No check else: def check_fficall_result(result, flags): if result == 0: return # if win64: # raises ValueError("ffi_call failed with code %d" % (result,)) if result < 0: if flags & FUNCFLAG_CDECL: raise StackCheckError( "Procedure called with not enough arguments" " (%d bytes missing)" " or wrong calling convention" % (-result,)) else: raise StackCheckError( "Procedure called with not enough arguments " " (%d bytes missing) " % (-result,)) else: raise StackCheckError( "Procedure called with too many " "arguments (%d bytes in excess) " % (result,)) if not _WIN32: libc_name = ctypes.util.find_library('c') assert libc_name is not None, "Cannot find C library, ctypes.util.find_library('c') returned None" def get_libc_name(): return libc_name elif _MSVC: get_libc_handle = external('pypy_get_libc_handle', [], DLLHANDLE) @jit.dont_look_inside def get_libc_name(): return rwin32.GetModuleFileName(get_libc_handle()) libc_name = get_libc_name().lower() assert "msvcr" in libc_name or 'ucrtbase' in libc_name, \ "Suspect msvcrt library: %s" % (get_libc_name(),) elif _MINGW: def get_libc_name(): return 'msvcrt.dll' if _WIN32: LoadLibrary = rwin32.LoadLibrary FFI_OK = cConfig.FFI_OK FFI_BAD_TYPEDEF = cConfig.FFI_BAD_TYPEDEF FFI_DEFAULT_ABI = cConfig.FFI_DEFAULT_ABI if _WIN32 and not _WIN64: FFI_STDCALL = cConfig.FFI_STDCALL if _ARM: FFI_SYSV = cConfig.FFI_SYSV FFI_VFP = cConfig.FFI_VFP FFI_TYPE_STRUCT = cConfig.FFI_TYPE_STRUCT FFI_CIFP = lltype.Ptr(cConfig.ffi_cif) FFI_CLOSUREP = lltype.Ptr(cConfig.ffi_closure) VOIDPP = rffi.CArrayPtr(rffi.VOIDP) c_ffi_prep_cif = external('ffi_prep_cif', [FFI_CIFP, FFI_ABI, rffi.UINT, FFI_TYPE_P, FFI_TYPE_PP], rffi.INT) if 0 and _MSVC: c_ffi_call_return_type = rffi.INT else: c_ffi_call_return_type = lltype.Void c_ffi_call = external('ffi_call', [FFI_CIFP, rffi.VOIDP, rffi.VOIDP, VOIDPP], c_ffi_call_return_type, save_err=rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) # Note: the RFFI_ALT_ERRNO flag matches the one in pyjitpl.direct_libffi_call CALLBACK_TP = rffi.CCallback([FFI_CIFP, rffi.VOIDP, rffi.VOIDPP, rffi.VOIDP], lltype.Void) c_ffi_prep_closure = external('ffi_prep_closure', [FFI_CLOSUREP, FFI_CIFP, CALLBACK_TP, rffi.VOIDP], rffi.INT) FFI_STRUCT_P = lltype.Ptr(lltype.Struct('FFI_STRUCT', ('ffistruct', FFI_TYPE_P.TO), ('members', lltype.Array(FFI_TYPE_P)))) @specialize.arg(3) def make_struct_ffitype_e(size, aligment, field_types, track_allocation=True): """Compute the type of a structure. Returns a FFI_STRUCT_P out of which the 'ffistruct' member is a regular FFI_TYPE. """ tpe = lltype.malloc(FFI_STRUCT_P.TO, len(field_types)+1, flavor='raw', track_allocation=track_allocation) tpe.ffistruct.c_type = rffi.cast(rffi.USHORT, FFI_TYPE_STRUCT) tpe.ffistruct.c_size = rffi.cast(rffi.SIZE_T, size) tpe.ffistruct.c_alignment = rffi.cast(rffi.USHORT, aligment) tpe.ffistruct.c_elements = rffi.cast(FFI_TYPE_PP, lltype.direct_arrayitems(tpe.members)) n = 0 while n < len(field_types): tpe.members[n] = field_types[n] n += 1 tpe.members[n] = lltype.nullptr(FFI_TYPE_P.TO) return tpe @specialize.memo() def cast_type_to_ffitype(tp): """ This function returns ffi representation of rpython type tp """ return TYPE_MAP[tp] @specialize.argtype(1) def push_arg_as_ffiptr(ffitp, arg, ll_buf): # This is for primitive types. Note that the exact type of 'arg' may be # different from the expected 'c_size'. To cope with that, we fall back # to a byte-by-byte copy. TP = lltype.typeOf(arg) TP_P = lltype.Ptr(rffi.CArray(TP)) TP_size = rffi.sizeof(TP) c_size = intmask(ffitp.c_size) # if both types have the same size, we can directly write the # value to the buffer if c_size == TP_size: buf = rffi.cast(TP_P, ll_buf) buf[0] = arg else: # needs byte-by-byte copying. Make sure 'arg' is an integer type. # Note that this won't work for rffi.FLOAT/rffi.DOUBLE. assert TP is not rffi.FLOAT and TP is not rffi.DOUBLE if TP_size <= rffi.sizeof(lltype.Signed): arg = rffi.cast(lltype.Unsigned, arg) else: arg = rffi.cast(lltype.UnsignedLongLong, arg) if _LITTLE_ENDIAN: for i in range(c_size): ll_buf[i] = chr(arg & 0xFF) arg >>= 8 elif _BIG_ENDIAN: for i in range(c_size-1, -1, -1): ll_buf[i] = chr(arg & 0xFF) arg >>= 8 else: raise AssertionError # type defs for callback and closure userdata USERDATA_P = lltype.Ptr(lltype.ForwardReference()) CALLBACK_TP = lltype.Ptr(lltype.FuncType([rffi.VOIDPP, rffi.VOIDP, USERDATA_P], lltype.Void)) USERDATA_P.TO.become(lltype.Struct('userdata', ('callback', CALLBACK_TP), ('addarg', lltype.Signed), hints={'callback':True})) @jit.jit_callback("CLIBFFI") def _ll_callback(ffi_cif, ll_res, ll_args, ll_userdata): """ Callback specification. ffi_cif - something ffi specific, don't care ll_args - rffi.VOIDPP - pointer to array of pointers to args ll_restype - rffi.VOIDP - pointer to result ll_userdata - a special structure which holds necessary information (what the real callback is for example), casted to VOIDP """ userdata = rffi.cast(USERDATA_P, ll_userdata) llop.revdb_do_next_call(lltype.Void) userdata.callback(ll_args, ll_res, userdata) def ll_callback(ffi_cif, ll_res, ll_args, ll_userdata): rposix._errno_after(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) _ll_callback(ffi_cif, ll_res, ll_args, ll_userdata) rposix._errno_before(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO) class StackCheckError(ValueError): message = None def __init__(self, message): self.message = message class LibFFIError(Exception): pass CHUNK = 4096 CLOSURES = rffi.CArrayPtr(FFI_CLOSUREP.TO) class ClosureHeap(object): def __init__(self): self.free_list = lltype.nullptr(rffi.VOIDP.TO) def _more(self): chunk = rffi.cast(CLOSURES, alloc(CHUNK)) count = CHUNK//rffi.sizeof(FFI_CLOSUREP.TO) for i in range(count): rffi.cast(rffi.VOIDPP, chunk)[0] = self.free_list self.free_list = rffi.cast(rffi.VOIDP, chunk) chunk = rffi.ptradd(chunk, 1) def alloc(self): if not self.free_list: self._more() p = self.free_list self.free_list = rffi.cast(rffi.VOIDPP, p)[0] return rffi.cast(FFI_CLOSUREP, p) def free(self, p): rffi.cast(rffi.VOIDPP,
# # For licensing see accompanying LICENSE.txt file. # Copyright (C) 2020 Apple Inc. All Rights Reserved. # from pylab import * import argparse import h5py import glob import os import PIL.ImageDraw parser = argparse.ArgumentParser() parser.add_argument("--scene_dir", required=True) parser.add_argument("--camera_name", required=True) parser.add_argument("--bounding_box_type", required=True) parser.add_argument("--frame_id", type=int) parser.add_argument("--num_pixels_per_fragment", type=int) args = parser.parse_args() assert args.bounding_box_type == "axis_aligned" or args.bounding_box_type == "object_aligned_2d" or args.bounding_box_type == "object_aligned_3d" print("[HYPERSIM: SCENE_GENERATE_IMAGES_BOUNDING_BOX] Begin...") eps = 5.0 # slack value for depth test; a fragment must be closer to the camera by a margin of eps to be rendered lw = 4 # line width back_face_cull = False # cull fragments based on whether they come from a geometric primitive that is facing away from the camera if args.num_pixels_per_fragment is not None: num_pixels_per_fragment = args.num_pixels_per_fragment else: num_pixels_per_fragment = 10 # generate relatively coarse fragments by default images_dir = os.path.join(args.scene_dir, "images") camera_keyframe_frame_indices_hdf5_file = os.path.join(args.scene_dir, "_detail", args.camera_name, "camera_keyframe_frame_indices.hdf5") camera_keyframe_positions_hdf5_file = os.path.join(args.scene_dir, "_detail", args.camera_name, "camera_keyframe_positions.hdf5") camera_keyframe_orientations_hdf5_file = os.path.join(args.scene_dir, "_detail", args.camera_name, "camera_keyframe_orientations.hdf5") in_scene_fileroot = "scene" in_rgb_jpg_dir = os.path.join(images_dir, in_scene_fileroot + "_" + args.camera_name + "_final_preview") in_rgb_jpg_files = os.path.join(images_dir, in_scene_fileroot + "_" + args.camera_name + "_final_preview", "frame.*.tonemap.jpg") in_position_hdf5_dir = os.path.join(images_dir, in_scene_fileroot + "_" + args.camera_name + "_geometry_hdf5") out_preview_dir = os.path.join(images_dir, in_scene_fileroot + "_" + args.camera_name + "_final_preview") mesh_objects_sii_hdf5_file = os.path.join(args.scene_dir, "_detail", "mesh", "mesh_objects_sii.hdf5") metadata_semantic_instance_colors_hdf5_file = os.path.join(args.scene_dir, "_detail", "mesh", "metadata_semantic_instance_colors.hdf5") metadata_semantic_colors_hdf5_file = os.path.join(args.scene_dir, "_detail", "mesh", "metadata_semantic_colors.hdf5") if args.bounding_box_type == "axis_aligned": metadata_semantic_instance_bounding_box_positions_hdf5_file = os.path.join(args.scene_dir, "_detail", "mesh", "metadata_semantic_instance_bounding_box_axis_aligned_positions.hdf5") metadata_semantic_instance_bounding_box_orientations_hdf5_file = os.path.join(args.scene_dir, "_detail", "mesh", "metadata_semantic_instance_bounding_box_axis_aligned_orientations.hdf5") metadata_semantic_instance_bounding_box_extents_hdf5_file = os.path.join(args.scene_dir, "_detail", "mesh", "metadata_semantic_instance_bounding_box_axis_aligned_extents.hdf5") if args.bounding_box_type == "object_aligned_2d": metadata_semantic_instance_bounding_box_positions_hdf5_file = os.path.join(args.scene_dir, "_detail", "mesh", "metadata_semantic_instance_bounding_box_object_aligned_2d_positions.hdf5") metadata_semantic_instance_bounding_box_orientations_hdf5_file = os.path.join(args.scene_dir, "_detail", "mesh", "metadata_semantic_instance_bounding_box_object_aligned_2d_orientations.hdf5") metadata_semantic_instance_bounding_box_extents_hdf5_file = os.path.join(args.scene_dir, "_detail", "mesh", "metadata_semantic_instance_bounding_box_object_aligned_2d_extents.hdf5") if args.bounding_box_type == "object_aligned_3d": metadata_semantic_instance_bounding_box_positions_hdf5_file = os.path.join(args.scene_dir, "_detail", "mesh", "metadata_semantic_instance_bounding_box_object_aligned_3d_positions.hdf5") metadata_semantic_instance_bounding_box_orientations_hdf5_file = os.path.join(args.scene_dir, "_detail", "mesh", "metadata_semantic_instance_bounding_box_object_aligned_3d_orientations.hdf5") metadata_semantic_instance_bounding_box_extents_hdf5_file = os.path.join(args.scene_dir, "_detail", "mesh", "metadata_semantic_instance_bounding_box_object_aligned_3d_extents.hdf5") with h5py.File(camera_keyframe_frame_indices_hdf5_file, "r") as f: camera_keyframe_frame_indices = f["dataset"][:] with h5py.File(camera_keyframe_positions_hdf5_file, "r") as f: camera_keyframe_positions = f["dataset"][:] with h5py.File(camera_keyframe_orientations_hdf5_file, "r") as f: camera_keyframe_orientations = f["dataset"][:] with h5py.File(mesh_objects_sii_hdf5_file, "r") as f: mesh_objects_sii = f["dataset"][:] with h5py.File(metadata_semantic_instance_colors_hdf5_file, "r") as f: semantic_instance_colors = f["dataset"][:] with h5py.File(metadata_semantic_instance_bounding_box_positions_hdf5_file, "r") as f: bounding_box_positions = f["dataset"][:] with h5py.File(metadata_semantic_instance_bounding_box_orientations_hdf5_file, "r") as f: bounding_box_orientations = f["dataset"][:] with h5py.File(metadata_semantic_instance_bounding_box_extents_hdf5_file, "r") as f: bounding_box_extents = f["dataset"][:] assert all(camera_keyframe_frame_indices == arange(camera_keyframe_frame_indices.shape[0])) if not os.path.exists(out_preview_dir): os.makedirs(out_preview_dir) in_filenames = [ os.path.basename(f) for f in sort(glob.glob(in_rgb_jpg_files)) ] for in_filename in in_filenames: in_filename_ids = [int(t) for t in in_filename.split(".") if t.isdigit()] assert len(in_filename_ids) == 1 frame_id = in_filename_ids[0] if args.frame_id is not None and frame_id != args.frame_id: continue in_file_root = in_filename.replace(".tonemap.jpg", "") in_rgb_jpg_file = os.path.join(in_rgb_jpg_dir, in_filename) in_position_hdf5_file = os.path.join(in_position_hdf5_dir, in_file_root + ".position.hdf5") out_rgb_bb_jpg_file = os.path.join(out_preview_dir, in_file_root + ".bb_" + args.bounding_box_type + ".jpg") try: rgb_color = imread(in_rgb_jpg_file) except: print("[HYPERSIM: SCENE_GENERATE_IMAGES_BOUNDING_BOX]") print("[HYPERSIM: SCENE_GENERATE_IMAGES_BOUNDING_BOX]") print("[HYPERSIM: SCENE_GENERATE_IMAGES_BOUNDING_BOX]") print("[HYPERSIM: SCENE_GENERATE_IMAGES_BOUNDING_BOX] WARNING: COULD NOT LOAD COLOR IMAGE: " + in_rgb_hdf5_file + "...") print("[HYPERSIM: SCENE_GENERATE_IMAGES_BOUNDING_BOX]") print("[HYPERSIM: SCENE_GENERATE_IMAGES_BOUNDING_BOX]") print("[HYPERSIM: SCENE_GENERATE_IMAGES_BOUNDING_BOX]") continue try: with h5py.File(in_position_hdf5_file, "r") as f: position = f["dataset"][:].astype(float32) except: print("[HYPERSIM: SCENE_GENERATE_IMAGES_BOUNDING_BOX]") print("[HYPERSIM: SCENE_GENERATE_IMAGES_BOUNDING_BOX]") print("[HYPERSIM: SCENE_GENERATE_IMAGES_BOUNDING_BOX]") print("[HYPERSIM: SCENE_GENERATE_IMAGES_BOUNDING_BOX] WARNING: COULD NOT LOAD POSITION IMAGE: " + in_position_hdf5_file + "...") print("[HYPERSIM: SCENE_GENERATE_IMAGES_BOUNDING_BOX]") print("[HYPERSIM: SCENE_GENERATE_IMAGES_BOUNDING_BOX]") print("[HYPERSIM: SCENE_GENERATE_IMAGES_BOUNDING_BOX]") continue # get image parameters height_pixels = rgb_color.shape[0] width_pixels = rgb_color.shape[1] # fov_x and fov_y need to match the _vray_user_params.py that was used to generate the images fov_x = pi/3.0 fov_y = 2.0 * arctan(height_pixels * tan(fov_x/2.0) / width_pixels) near = 1.0 far = 1000.0 # # construct projection matrix # # HACK: we should use the per-scene projection matrix defined in contrib/mikeroberts3000 # because this matrix will be incorrect for some scenes # f_h = tan(fov_y/2.0)*near f_w = f_h*width_pixels/height_pixels left = -f_w right = f_w bottom = -f_h top = f_h M_proj = matrix(zeros((4,4))) M_proj[0,0] = (2.0*near)/(right - left) M_proj[1,1] = (2.0*near)/(top - bottom) M_proj[0,2] = (right + left)/(right - left) M_proj[1,2] = (top + bottom)/(top - bottom) M_proj[2,2] = -(far + near)/(far - near) M_proj[3,2] = -1.0 M_proj[2,3] = -(2.0*far*near)/(far - near) # get camera parameters keyframe_ids = where(camera_keyframe_frame_indices == frame_id)[0] assert len(keyframe_ids) == 1 keyframe_id = keyframe_ids[0] camera_position = camera_keyframe_positions[keyframe_id] camera_orientation = camera_keyframe_orientations[keyframe_id] R_world_from_cam = matrix(camera_orientation) t_world_from_cam = matrix(camera_position).T R_cam_from_world = R_world_from_cam.T t_cam_from_world = -R_cam_from_world*t_world_from_cam print("[HYPERSIM: SCENE_GENERATE_IMAGES_BOUNDING_BOX] Generating fragments...") num_fragments_per_pixel = 1.0/num_pixels_per_fragment fragments_p1_world = [] fragments_p2_world = [] fragments_p1_cam = [] fragments_p2_cam = [] fragments_p1_ndc = [] fragments_p2_ndc = [] fragments_p1_screen = [] fragments_p2_screen = [] fragments_color = [] for sii in unique(mesh_objects_sii): if sii == -1: continue color_sii = semantic_instance_colors[sii] bounding_box_center_world = matrix(bounding_box_positions[sii]).A bounding_box_extent_world = matrix(bounding_box_extents[sii]).A R_world_from_obj = matrix(bounding_box_orientations[sii]) t_world_from_obj = matrix(bounding_box_positions[sii]).T def transform_point_screen_from_world(p_world): p_cam = t_cam_from_world + R_cam_from_world*p_world p_cam_ = matrix(r_[ p_cam.A1, 1 ]).T p_clip = M_proj*p_cam_ p_ndc = p_clip/p_clip[3] p_ndc_ = p_ndc.A1 p_screen_x = 0.5*(p_ndc_[0]+1)*(width_pixels-1) p_screen_y = (1 - 0.5*(p_ndc_[1]+1))*(height_pixels-1) p_screen_z = (p_ndc_[2]+1)/2.0 p_screen = matrix([p_screen_x, p_screen_y, p_screen_z]).T return p_screen, p_ndc, p_clip, p_cam def transform_point_world_from_obj(p_obj): p_world = t_world_from_obj + R_world_from_obj*p_obj return p_world def transform_point_screen_from_obj(p_obj): p_world = transform_point_world_from_obj(p_obj) p_screen, p_ndc, p_clip, p_cam = transform_point_screen_from_world(p_world) return p_screen, p_ndc, p_clip, p_cam, p_world def generate_fragment(p1_obj, p2_obj, n_obj, color): p1_screen, p1_ndc, p1_clip, p1_cam, p1_world = transform_point_screen_from_obj(p1_obj) p2_screen, p2_ndc, p2_clip, p2_cam, p2_world = transform_point_screen_from_obj(p2_obj) p1_inside_frustum = all(p1_ndc == clip(p1_ndc,-1,1)) p2_inside_frustum = all(p2_ndc == clip(p2_ndc,-1,1)) p_center_world = (p1_world+p2_world)/2.0 p_camera_world = matrix(camera_position).T v_world = p_camera_world - p_center_world n_world = R_world_from_obj*n_obj front_facing = dot(v_world.A1, n_world.A1) > 0 if back_face_cull and not front_facing: return if not (p1_inside_frustum or p2_inside_frustum): return fragments_p1_world.append(p1_world.A1) fragments_p2_world.append(p2_world.A1) fragments_p1_cam.append(p1_cam.A1) fragments_p2_cam.append(p1_cam.A1) fragments_p1_ndc.append(p1_ndc.A1) fragments_p2_ndc.append(p2_ndc.A1) fragments_p1_screen.append(p1_screen.A1) fragments_p2_screen.append(p2_screen.A1) fragments_color.append(color) def generate_fragments_for_line(p1_obj, p2_obj, n_obj, color): p1_screen, p1_ndc, p1_clip, p1_cam, p1_world = transform_point_screen_from_obj(p1_obj) p2_screen, p2_ndc, p2_clip, p2_cam, p2_world = transform_point_screen_from_obj(p2_obj) p1_inside_frustum = all(p1_ndc == clip(p1_ndc,-1,1)) p2_inside_frustum = all(p2_ndc == clip(p2_ndc,-1,1)) # HACK: strictly speaking this frustum culling test is incorrect, because it will discard lines # that pass through the frustum but whose endpoints are both outside the frustum; but this is a # rare case, and frustum culling in this way is a lot faster, so we do it anyway if p1_inside_frustum or p2_inside_frustum: num_pixels_per_line = linalg.norm(p2_screen - p1_screen) num_fragments_per_line = int(ceil(num_pixels_per_line*num_fragments_per_pixel)) t = linspace(0,1,num_fragments_per_line+1) for ti in range(num_fragments_per_line): t_curr = t[ti] t_next = t[ti+1] p_curr_obj = t_curr*p1_obj + (1-t_curr)*p2_obj p_next_obj = t_next*p1_obj + (1-t_next)*p2_obj generate_fragment(p_curr_obj, p_next_obj, n_obj, color) bounding_box_corner_000_obj = diag(matrix(bounding_box_extent_world).A1)*(matrix([0.0,0.0,0.0]).T - 0.5) bounding_box_corner_100_obj = diag(matrix(bounding_box_extent_world).A1)*(matrix([1.0,0.0,0.0]).T - 0.5) bounding_box_corner_010_obj = diag(matrix(bounding_box_extent_world).A1)*(matrix([0.0,1.0,0.0]).T - 0.5) bounding_box_corner_110_obj = diag(matrix(bounding_box_extent_world).A1)*(matrix([1.0,1.0,0.0]).T - 0.5) bounding_box_corner_001_obj = diag(matrix(bounding_box_extent_world).A1)*(matrix([0.0,0.0,1.0]).T - 0.5) bounding_box_corner_101_obj = diag(matrix(bounding_box_extent_world).A1)*(matrix([1.0,0.0,1.0]).T - 0.5) bounding_box_corner_011_obj = diag(matrix(bounding_box_extent_world).A1)*(matrix([0.0,1.0,1.0]).T - 0.5) bounding_box_corner_111_obj = diag(matrix(bounding_box_extent_world).A1)*(matrix([1.0,1.0,1.0]).T - 0.5) # x=0 v_plane_normal_obj = matrix([-1,0,0]).T generate_fragments_for_line(bounding_box_corner_000_obj, bounding_box_corner_010_obj, v_plane_normal_obj, color_sii) generate_fragments_for_line(bounding_box_corner_010_obj, bounding_box_corner_011_obj, v_plane_normal_obj, color_sii) generate_fragments_for_line(bounding_box_corner_011_obj, bounding_box_corner_001_obj, v_plane_normal_obj, color_sii) generate_fragments_for_line(bounding_box_corner_001_obj, bounding_box_corner_000_obj, v_plane_normal_obj, color_sii) # x=1 v_plane_normal_obj = matrix([1,0,0]).T generate_fragments_for_line(bounding_box_corner_100_obj, bounding_box_corner_110_obj, v_plane_normal_obj, color_sii) generate_fragments_for_line(bounding_box_corner_110_obj, bounding_box_corner_111_obj, v_plane_normal_obj, color_sii) generate_fragments_for_line(bounding_box_corner_111_obj, bounding_box_corner_101_obj, v_plane_normal_obj, color_sii) generate_fragments_for_line(bounding_box_corner_101_obj, bounding_box_corner_100_obj, v_plane_normal_obj, color_sii) # y=0 v_plane_normal_obj = matrix([0,-1,0]).T generate_fragments_for_line(bounding_box_corner_000_obj, bounding_box_corner_100_obj, v_plane_normal_obj, color_sii) generate_fragments_for_line(bounding_box_corner_100_obj, bounding_box_corner_101_obj, v_plane_normal_obj, color_sii) generate_fragments_for_line(bounding_box_corner_101_obj, bounding_box_corner_001_obj, v_plane_normal_obj, color_sii) generate_fragments_for_line(bounding_box_corner_001_obj, bounding_box_corner_000_obj, v_plane_normal_obj, color_sii) # y=1 v_plane_normal_obj = matrix([0,1,0]).T generate_fragments_for_line(bounding_box_corner_010_obj, bounding_box_corner_110_obj, v_plane_normal_obj, color_sii) generate_fragments_for_line(bounding_box_corner_110_obj, bounding_box_corner_111_obj, v_plane_normal_obj, color_sii) generate_fragments_for_line(bounding_box_corner_111_obj, bounding_box_corner_011_obj, v_plane_normal_obj, color_sii) generate_fragments_for_line(bounding_box_corner_011_obj, bounding_box_corner_010_obj, v_plane_normal_obj, color_sii) # z=0 v_plane_normal_obj = matrix([0,0,-1]).T generate_fragments_for_line(bounding_box_corner_000_obj, bounding_box_corner_100_obj, v_plane_normal_obj, color_sii) generate_fragments_for_line(bounding_box_corner_100_obj, bounding_box_corner_110_obj, v_plane_normal_obj, color_sii) generate_fragments_for_line(bounding_box_corner_110_obj, bounding_box_corner_010_obj, v_plane_normal_obj, color_sii) generate_fragments_for_line(bounding_box_corner_010_obj, bounding_box_corner_000_obj, v_plane_normal_obj, color_sii) # z=1 v_plane_normal_obj = matrix([0,0,1]).T generate_fragments_for_line(bounding_box_corner_001_obj, bounding_box_corner_101_obj, v_plane_normal_obj, color_sii) generate_fragments_for_line(bounding_box_corner_101_obj, bounding_box_corner_111_obj, v_plane_normal_obj, color_sii) generate_fragments_for_line(bounding_box_corner_111_obj, bounding_box_corner_011_obj, v_plane_normal_obj, color_sii) generate_fragments_for_line(bounding_box_corner_011_obj, bounding_box_corner_001_obj, v_plane_normal_obj, color_sii) fragments_p1_world = array(fragments_p1_world) fragments_p2_world = array(fragments_p2_world) fragments_p1_cam = array(fragments_p1_cam) fragments_p2_cam = array(fragments_p2_cam) fragments_p1_ndc = array(fragments_p1_ndc) fragments_p2_ndc = array(fragments_p2_ndc) fragments_p1_screen = array(fragments_p1_screen) fragments_p2_screen = array(fragments_p2_screen) fragments_color = array(fragments_color) num_fragments = fragments_p1_world.shape[0] print("[HYPERSIM: SCENE_GENERATE_IMAGES_BOUNDING_BOX] Generated " + str(num_fragments) + " fragments...") fragments_p_center_world = (fragments_p1_world+fragments_p2_world)/2.0 fragments_p_center_cam = (fragments_p1_cam+fragments_p2_cam)/2.0 # sort fragments in back-to-front order, i.e., by z-axis coordinate in camera space fragment_inds_sorted = argsort(fragments_p_center_cam[:,2]) # discard fragments based on a depth test fragment_inds_sorted_depth_test_pass = [] for fi in fragment_inds_sorted: p1_world = matrix(fragments_p1_world[fi]).T p2_world = matrix(fragments_p2_world[fi]).T p_center_world = matrix(fragments_p_center_world[fi]).T p_center_screen, p_center_ndc, p_center_clip, p_center_cam = transform_point_screen_from_world(p_center_world) p_center_inside_frustum = all(p_center_ndc == clip(p_center_ndc,-1,1)) if p_center_inside_frustum: p_test_world = p_center_world p_test_screen = p_center_screen else: p1_ndc = matrix(fragments_p1_ndc[fi]).T p2_ndc = matrix(fragments_p2_ndc[fi]).T p1_screen = matrix(fragments_p1_screen[fi]).T p2_screen = matrix(fragments_p2_screen[fi]).T p1_inside_frustum = all(p1_ndc == clip(p1_ndc,-1,1)) p2_inside_frustum = all(p2_ndc == clip(p2_ndc,-1,1)) assert p1_inside_frustum + p2_inside_frustum == 1 if p1_inside_frustum: p_test_world = p1_world p_test_screen = p1_screen if p2_inside_frustum: p_test_world = p2_world p_test_screen = p2_screen p_test_screen_int = p_test_screen.astype(int32) p_img_world = position[p_test_screen_int[1], p_test_screen_int[0]] if linalg.norm(camera_position - p_test_world.A1) - eps < linalg.norm(camera_position - p_img_world): fragment_inds_sorted_depth_test_pass.append(fi) fragment_inds_sorted_depth_test_pass = array(fragment_inds_sorted_depth_test_pass) fragments_p1_world = fragments_p1_world[fragment_inds_sorted_depth_test_pass] fragments_p2_world = fragments_p2_world[fragment_inds_sorted_depth_test_pass] fragments_p1_cam = fragments_p1_cam[fragment_inds_sorted_depth_test_pass] fragments_p2_cam = fragments_p2_cam[fragment_inds_sorted_depth_test_pass] fragments_p1_ndc = fragments_p1_ndc[fragment_inds_sorted_depth_test_pass] fragments_p2_ndc = fragments_p2_ndc[fragment_inds_sorted_depth_test_pass] fragments_p1_screen
and type P")] for atom in mda_universe.atoms: if atom.resname == group1 or atom.resname == group2: if atom.resid in lower_membrane: if atom.type == "P" or atom.name == c_atom_name: if atom.resname not in membrane["Lower"]: membrane["Lower"][atom.resname] = [atom.id] else: membrane["Lower"][atom.resname].append(atom.id) elif atom.resid in upper_membrane: if atom.type == "P" or atom.name == c_atom_name: if atom.resname not in membrane["Upper"]: membrane["Upper"][atom.resname] = [atom.id] else: membrane["Upper"][atom.resname].append(atom.id) ndx_filename = f'custom_membrane_{random_string}.ndx' with open(ndx_filename, 'w') as f: for i in membrane: for resgroup in membrane[i]: f.write(f"[ {i}_{resgroup} ]\n") group_str = " ".join([str(i) for i in membrane[i][resgroup]]) f.write("\n".join(textwrap.wrap(group_str, 15))) f.write("\n") for group in [group1, group2]: filename_lower = f"angle_lower_{group}_{random_string}.xvg" temp_group = f"Lower_{group}" subprocess.run(["gmx", "gangle", "-f", trr_filename, "-s", tpr_filename, "-n", ndx_filename, "-group1", temp_group, "-seltype", "whole_res_com", "-oav", filename_lower], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) for group in [group1, group2]: filename_upper = f"angle_upper_{group}_{random_string}.xvg" temp_group = f"Upper_{group}" subprocess.run(["gmx", "gangle", "-f", trr_filename, "-s", tpr_filename, "-n", ndx_filename, "-group1", temp_group, "-seltype", "whole_res_com", "-oav", filename_upper], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) angle = {'Upper':{}, 'Lower':{}} for group in [group1, group2]: if mode=="average": for n, i in enumerate([f"angle_upper_{group}_{random_string}.xvg", f"angle_lower_{group}_{random_string}.xvg"]): angle[list(angle.keys())[n]][group] = [] with open(i, "r") as f: for line in f: if (not line[0]=="#") and (not line[0]=="@"): angle[list(angle.keys())[n]][group].append(float(line.split()[1])) angle[list(angle.keys())[n]][group]=mean(angle[list(angle.keys())[n]][group]) elif mode=="framewise" or mode=="window": for n, i in enumerate([f"angle_upper_{group}_{random_string}.xvg", f"angle_lower_{group}_{random_string}.xvg"]): angle[list(angle.keys())[n]][group] = [] with open(i, "r") as f: for line in f: if (not line[0]=="#") and (not line[0]=="@"): angle[list(angle.keys())[n]][group].append(float(line.split()[1])) subprocess.run(["rm", i]) subprocess.run(["rm", ndx_filename]) return angle else: if split == False: avg_angles = {} membrane = {} for i in mda_universe.atoms: if i.resname in selection: if i.type == "P" or i.name == c_atom_name: if i.resname not in membrane: membrane[i.resname] = [i.id] else: membrane[i.resname].append(i.id) ndx_filename = f'custom_membrane_{random_string}.ndx' with open(ndx_filename, 'w') as f: if grouping == "combine": f.write(f"[ Combined ]\n") for i in membrane: if grouping != "combine": f.write(f"[ {i} ]\n") group_str = " ".join([str(i) for i in membrane[i]]) f.write("\n".join(textwrap.wrap(group_str, 15))) f.write("\n") if grouping != "combine": for group in selection: filename=f"angle_{group}_{random_string}.xvg" subprocess.run(["gmx", "gangle", "-f", trr_filename, "-s", tpr_filename, "-n", ndx_filename, "-g1", g1, "-g2", g2, "-group1", group, "-seltype", seltype, "-selrpos", selrpos, "-oav", filename], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) angle = [] with open(filename, "r") as f: for line in f: if (not line[0]=="#") and (not line[0]=="@"): angle.append(float(line.split()[1])) subprocess.run(["rm", filename]) avg_angles[group] = angle else: filename=f"angle_combined_{random_string}.xvg" group = "Combined" subprocess.run(["gmx", "gangle", "-f", trr_filename, "-s", tpr_filename, "-n", ndx_filename, "-g1", g1, "-g2", g2, "-group1", group, "-seltype", seltype, "-selrpos", selrpos, "-oav", filename], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) angle = [] with open(filename, "r") as f: for line in f: if (not line[0]=="#") and (not line[0]=="@"): angle.append(float(line.split()[1])) subprocess.run(["rm", filename]) avg_angles[group] = angle subprocess.run(["rm", ndx_filename]) return avg_angles else: membrane = {'Lower':{}, 'Upper':{}} lower_membrane = [atom.resid for atom in mda_universe.select_atoms(f"prop z < {mda_universe.select_atoms('type P').center_of_mass()[2]} and type P")] upper_membrane = [atom.resid for atom in mda_universe.select_atoms(f"prop z > {mda_universe.select_atoms('type P').center_of_mass()[2]} and type P")] for atom in mda_universe.atoms: if atom.resname in selection: if atom.resid in lower_membrane: if atom.type == "P" or atom.name == c_atom_name: if atom.resname not in membrane["Lower"]: membrane["Lower"][atom.resname] = [atom.id] else: membrane["Lower"][atom.resname].append(atom.id) elif atom.resid in upper_membrane: if atom.type == "P" or atom.name == c_atom_name: if atom.resname not in membrane["Upper"]: membrane["Upper"][atom.resname] = [atom.id] else: membrane["Upper"][atom.resname].append(atom.id) ndx_filename = f'custom_membrane_{random_string}.ndx' with open(ndx_filename, 'w') as f: for i in membrane: if grouping == "combine": f.write(f"[ {i}_Combined ]\n") for resgroup in membrane[i]: if grouping != "combine": f.write(f"[ {i}_{resgroup} ]\n") group_str = " ".join([str(i) for i in membrane[i][resgroup]]) f.write("\n".join(textwrap.wrap(group_str, 15))) f.write("\n") if grouping != "combine": for group in selection: filename_lower = f"angle_lower_{group}_{random_string}.xvg" temp_group = f"Lower_{group}" subprocess.run(["gmx", "gangle", "-f", trr_filename, "-s", tpr_filename, "-n", ndx_filename, "-group1", temp_group, "-seltype", "whole_res_com", "-oav", filename_lower], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) for group in selection: filename_upper = f"angle_upper_{group}_{random_string}.xvg" temp_group = f"Upper_{group}" subprocess.run(["gmx", "gangle", "-f", trr_filename, "-s", tpr_filename, "-n", ndx_filename, "-group1", temp_group, "-seltype", "whole_res_com", "-oav", filename_upper], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) else: group = "Combined" filename_lower = f"angle_lower_{group}_{random_string}.xvg" temp_group = f"Lower_{group}" subprocess.run(["gmx", "gangle", "-f", trr_filename, "-s", tpr_filename, "-n", ndx_filename, "-group1", temp_group, "-seltype", "whole_res_com", "-oav", filename_lower], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) filename_upper = f"angle_upper_{group}_{random_string}.xvg" temp_group = f"Upper_{group}" subprocess.run(["gmx", "gangle", "-f", trr_filename, "-s", tpr_filename, "-n", ndx_filename, "-group1", temp_group, "-seltype", "whole_res_com", "-oav", filename_upper], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) angle = {'Upper':{}, 'Lower':{}} if grouping != "combine": for group in selection: for n, i in enumerate([f"angle_upper_{group}_{random_string}.xvg", f"angle_lower_{group}_{random_string}.xvg"]): angle[list(angle.keys())[n]][group] = [] with open(i, "r") as f: for line in f: if (not line[0]=="#") and (not line[0]=="@"): angle[list(angle.keys())[n]][group].append(float(line.split()[1])) subprocess.run(["rm", i]) else: group = "Combined" for n, i in enumerate([f"angle_upper_{group}_{random_string}.xvg", f"angle_lower_{group}_{random_string}.xvg"]): angle[list(angle.keys())[n]][group] = [] with open(i, "r") as f: for line in f: if (not line[0]=="#") and (not line[0]=="@"): angle[list(angle.keys())[n]][group].append(float(line.split()[1])) subprocess.run(["rm", i]) subprocess.run(["rm", ndx_filename]) return angle def clustering_plots(pdb_file, top_bottom, protein_residue_names, lipids_to_cluster, attached_ligands, mode="pair", plot_name="Cluster", box_side_length = 6): class PDB_Atom(): def __init__(self, pdbline): self.Atom_serial_number = str(pdbline[7:12]) self.Res_name = pdbline[18:22].strip() self.chain_identifies = pdbline[22] self.Res_number = int(pdbline[23:27]) self.xcoord = float(pdbline[31:39]) self.ycoord = float(pdbline[39:47]) self.zcoord = float(pdbline[47:55]) self.Temp_factor = pdbline[61:67] self.PDBLINE = pdbline self.Inner_surface = False self.Residue_ID = str(self.Res_number)+"."+str(self.chain_identifies) self.Selected_this_run = False self.atom_name = pdbline[13:17].strip() class Coordinate: def __init__(self,x,y,z): self.x = x self.y = y self.z = z try: # open the PDB_file and read all the data with open(pdb_file, 'r') as f: all_atoms_list = [] while True: line = f.readline() if line[:6] == "ENDMDL": break # Parse the PDB data to get all the atomic information in the file line = " "+line if(line[1:5]=="ATOM" or line[1:5]=="HETA"): a = PDB_Atom(line) all_atoms_list.append(a) f.close() # Now divide the atoms into lipid and protein atoms protein_atoms_list = [] lipid_atoms_list = [] ALL_LIPIDS = [] for atom in all_atoms_list: ALL_LIPIDS.append(atom) if(atom.Res_name in protein_residue_names): protein_atoms_list.append(atom) elif(atom.Res_name not in attached_ligands): lipid_atoms_list.append(atom) min_z = 10000 max_z = -10000 mean_z = 0 for atom in lipid_atoms_list: mean_z += atom.zcoord if(atom.zcoord > max_z): max_z = atom.zcoord if(atom.zcoord < min_z): min_z = atom.zcoord protein_coordinates_list = [] lipid_coordinates_list = [] prev_prot_resnum = protein_atoms_list[0].Res_number x, y, z = 0, 0, 0 count = 0 for atom in protein_atoms_list: if(atom.Res_number == prev_prot_resnum): x += atom.xcoord y += atom.ycoord z += atom.zcoord count += 1 else: protein_coordinates_list.append( Coordinate(x/count, y/count, z/count) ) x, y, z = atom.xcoord, atom.ycoord, atom.zcoord prev_prot_resnum = atom.Res_number count = 1 protein_coordinates_list.append(Coordinate(x/count, y/count, z/count)) x, y, z = 0, 0, 0 count = 0 temp_list = [] for atom in lipid_atoms_list: if(atom.Res_name in lipids_to_cluster): temp_list.append(atom) lipid_atoms_list = temp_list prev_lipid_resnum = lipid_atoms_list[0].Res_number for atom in lipid_atoms_list: if (atom.Res_number == prev_lipid_resnum): x += atom.xcoord y += atom.ycoord z += atom.zcoord count += 1 else: lipid_coordinates_list.append( Coordinate(x/count, y/count, z/count) ) x, y, z = atom.xcoord, atom.ycoord, atom.zcoord prev_lipid_resnum = atom.Res_number count = 1 lipid_coordinates_list.append(Coordinate(x/count, y/count, z/count)) min_memb_x = min(lipid_atoms_list, key=lambda x: x.xcoord).xcoord min_memb_y = min(lipid_atoms_list, key= lambda x: x.ycoord).ycoord mean_z = 0 for coord in lipid_coordinates_list: mean_z += coord.z for coord in protein_coordinates_list: mean_z += coord.z mean_z /= (len(protein_coordinates_list)+ len(lipid_coordinates_list)) z_min_lip = min(lipid_coordinates_list, key=lambda x: x.z).z z_max_lip = max(lipid_coordinates_list, key=lambda x: x.z).z new_lip_coords = [] new_prot_coords = [] for coord in lipid_coordinates_list: if(top_bottom == "top"): if(coord.z >= mean_z): new_lip_coords.append(coord) elif(top_bottom == "bottom"): if(coord.z <= mean_z): new_lip_coords.append(coord) else: new_lip_coords.append(coord) lipid_coordinates_list = new_lip_coords for coord in protein_coordinates_list: if(top_bottom == "top"): if(coord.z >= mean_z and coord.z < z_max_lip): new_prot_coords.append(coord) elif(top_bottom == "bottom"): if(coord.z <= mean_z and coord.z >= z_min_lip): new_prot_coords.append(coord) else: if(coord.z >= z_min_lip and coord.z <= z_max_lip): new_prot_coords.append(coord) protein_coordinates_list = new_prot_coords all_coords_list = [] for coord in lipid_coordinates_list: all_coords_list.append(coord) for coord in protein_coordinates_list: all_coords_list.append(coord) min_x = min(ALL_LIPIDS, key= lambda x: x.xcoord).xcoord min_y = min(ALL_LIPIDS, key= lambda x: x.ycoord).ycoord min_z = min(all_coords_list, key= lambda x: x.z).z max_x = max(ALL_LIPIDS, key= lambda x: x.xcoord).xcoord max_y = max(ALL_LIPIDS, key= lambda x: x.ycoord).ycoord max_z = max(all_coords_list, key= lambda x: x.z).z num_partX = math.ceil((max_x - min_x)/box_side_length) num_partY = math.ceil((max_y - min_y)/box_side_length) protein_cluster_grid = np.zeros((num_partX, num_partY)) lipid_cluster_grid = np.zeros((num_partX, num_partY)) for coord in lipid_coordinates_list: x_ind = math.floor(coord.x / box_side_length) y_ind = math.floor(coord.y / box_side_length) lipid_cluster_grid[x_ind][y_ind] += 1 for coord in protein_coordinates_list: x_ind = math.floor(coord.x / box_side_length) y_ind = math.floor(coord.y / box_side_length) protein_cluster_grid[x_ind][y_ind] += 10 protein_grid_x = [] protein_grid_y = [] x = 0 y = 0 for i in protein_cluster_grid: y = 0 for j in i: if j!= 0: protein_grid_x.append(x) protein_grid_y.append(y) y += 1 x += 1 lipid_cluster_grid = np.array(lipid_cluster_grid) protein_cluster_grid = np.array(protein_cluster_grid) cols = { 1:
# # Copyright 2005,2007,2012 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, filter import math import cmath # # An analog deemphasis filter: # # R # o------/\/\/\/---+----o # | # = C # | # --- # # Has this transfer function: # # 1 1 # ---- --- # RC tau # H(s) = ---------- = ---------- # 1 1 # s + ---- s + --- # RC tau # # And has its -3 dB response, due to the pole, at # # |H(j w_c)|^2 = 1/2 => s = j w_c = j (1/(RC)) # # Historically, this corner frequency of analog audio deemphasis filters # been specified by the RC time constant used, called tau. # So w_c = 1/tau. # # FWIW, for standard tau values, some standard analog components would be: # tau = 75 us = (50K)(1.5 nF) = (50 ohms)(1.5 uF) # tau = 50 us = (50K)(1.0 nF) = (50 ohms)(1.0 uF) # # In specifying tau for this digital deemphasis filter, tau specifies # the *digital* corner frequency, w_c, desired. # # The digital deemphasis filter design below, uses the # "bilinear transformation" method of designing digital filters: # # 1. Convert digital specifications into the analog domain, by prewarping # digital frequency specifications into analog frequencies. # # w_a = (2/T)tan(wT/2) # # 2. Use an analog filter design technique to design the filter. # # 3. Use the bilinear transformation to convert the analog filter design to a # digital filter design. # # H(z) = H(s)| # s = (2/T)(1-z^-1)/(1+z^-1) # # # w_ca 1 1 - (-1) z^-1 # H(z) = ---- * ----------- * ----------------------- # 2 fs -w_ca -w_ca # 1 - ----- 1 + ----- # 2 fs 2 fs # 1 - ----------- z^-1 # -w_ca # 1 - ----- # 2 fs # # We use this design technique, because it is an easy way to obtain a filter # design with the -6 dB/octave roll-off required of the deemphasis filter. # # Jackson, <NAME>., _Digital_Filters_and_Signal_Processing_Second_Edition_, # Kluwer Academic Publishers, 1989, pp 201-212 # # Orfanidis, <NAME>., _Introduction_to_Signal_Processing_, Prentice Hall, # 1996, pp 573-583 # class fm_deemph(gr.hier_block2): """ FM Deemphasis IIR filter. """ def __init__(self, fs, tau=75e-6): """ Args: fs: sampling frequency in Hz (float) tau: Time constant in seconds (75us in US, 50us in EUR) (float) """ gr.hier_block2.__init__(self, "fm_deemph", gr.io_signature(1, 1, gr.sizeof_float), # Input signature gr.io_signature(1, 1, gr.sizeof_float)) # Output signature # Digital corner frequency w_c = 1.0 / tau # Prewarped analog corner frequency w_ca = 2.0 * fs * math.tan(w_c / (2.0 * fs)) # Resulting digital pole, zero, and gain term from the bilinear # transformation of H(s) = w_ca / (s + w_ca) to # H(z) = b0 (1 - z1 z^-1)/(1 - p1 z^-1) k = -w_ca / (2.0 * fs) z1 = -1.0 p1 = (1.0 + k) / (1.0 - k) b0 = -k / (1.0 - k) btaps = [ b0 * 1.0, b0 * -z1 ] ataps = [ 1.0, -p1 ] # Since H(s = 0) = 1.0, then H(z = 1) = 1.0 and has 0 dB gain at DC if 0: print "btaps =", btaps print "ataps =", ataps global plot1 plot1 = gru.gnuplot_freqz(gru.freqz(btaps, ataps), fs, True) deemph = filter.iir_filter_ffd(btaps, ataps, False) self.connect(self, deemph, self) # # An analog preemphasis filter, that flattens out again at the high end: # # C # +-----||------+ # | | # o------+ +-----+--------o # | R1 | | # +----/\/\/\/--+ \ # / # \ R2 # / # \ # | # o--------------------------+--------o # # (This fine ASCII rendition is based on Figure 5-15 # in "Digital and Analog Communication Systems", <NAME>) # # Has this transfer function: # # 1 # s + --- # R1C # H(s) = ------------------ # 1 R1 # s + --- (1 + --) # R1C R2 # # # It has a corner due to the numerator, where the rise starts, at # # |Hn(j w_cl)|^2 = 2*|Hn(0)|^2 => s = j w_cl = j (1/(R1C)) # # It has a corner due to the denominator, where it levels off again, at # # |Hn(j w_ch)|^2 = 1/2*|Hd(0)|^2 => s = j w_ch = j (1/(R1C) * (1 + R1/R2)) # # Historically, the corner frequency of analog audio preemphasis filters # been specified by the R1C time constant used, called tau. # # So # w_cl = 1/tau = 1/R1C; f_cl = 1/(2*pi*tau) = 1/(2*pi*R1*C) # w_ch = 1/tau2 = (1+R1/R2)/R1C; f_ch = 1/(2*pi*tau2) = (1+R1/R2)/(2*pi*R1*C) # # and note f_ch = f_cl * (1 + R1/R2). # # For broadcast FM audio, tau is 75us in the United States and 50us in Europe. # f_ch should be higher than our digital audio bandwidth. # # The Bode plot looks like this: # # # /---------------- # / # / <-- slope = 20dB/decade # / # -------------/ # f_cl f_ch # # In specifying tau for this digital preemphasis filter, tau specifies # the *digital* corner frequency, w_cl, desired. # # The digital preemphasis filter design below, uses the # "bilinear transformation" method of designing digital filters: # # 1. Convert digital specifications into the analog domain, by prewarping # digital frequency specifications into analog frequencies. # # w_a = (2/T)tan(wT/2) # # 2. Use an analog filter design technique to design the filter. # # 3. Use the bilinear transformation to convert the analog filter design to a # digital filter design. # # H(z) = H(s)| # s = (2/T)(1-z^-1)/(1+z^-1) # # # -w_cla # 1 + ------ # 2 fs # 1 - ------------ z^-1 # -w_cla -w_cla # 1 - ------ 1 - ------ # 2 fs 2 fs # H(z) = ------------ * ----------------------- # -w_cha -w_cha # 1 - ------ 1 + ------ # 2 fs 2 fs # 1 - ------------ z^-1 # -w_cha # 1 - ------ # 2 fs # # We use this design technique, because it is an easy way to obtain a filter # design with the 6 dB/octave rise required of the premphasis filter. # # Jackson, <NAME>., _Digital_Filters_and_Signal_Processing_Second_Edition_, # Kluwer Academic Publishers, 1989, pp 201-212 # # Orfanidis, <NAME>., _Introduction_to_Signal_Processing_, Prentice Hall, # 1996, pp 573-583 # class fm_preemph(gr.hier_block2): """ FM Preemphasis IIR filter. """ def __init__(self, fs, tau=75e-6, fh=-1.0): """ Args: fs: sampling frequency in Hz (float) tau: Time constant in seconds (75us in US, 50us in EUR) (float) fh: High frequency at which to flatten out (< 0 means default of 0.925*fs/2.0) (float) """ gr.hier_block2.__init__(self, "fm_preemph", gr.io_signature(1, 1, gr.sizeof_float), # Input signature gr.io_signature(1, 1, gr.sizeof_float)) # Output signature # Set fh to something sensible, if needed. # N.B. fh == fs/2.0 or fh == 0.0 results in a pole on the unit circle # at z = -1.0 or z = 1.0 respectively. That makes the filter unstable # and useless. if fh <= 0.0 or fh >= fs/2.0: fh = 0.925 * fs/2.0 # Digital corner frequencies w_cl = 1.0 / tau w_ch = 2.0 * math.pi * fh # Prewarped analog corner frequencies w_cla = 2.0 * fs * math.tan(w_cl / (2.0 * fs)) w_cha = 2.0 * fs * math.tan(w_ch / (2.0 * fs)) # Resulting digital pole, zero, and gain term from the bilinear # transformation of H(s) = (s + w_cla) / (s + w_cha) to # H(z) = b0 (1 - z1 z^-1)/(1 - p1 z^-1) kl = -w_cla / (2.0 * fs) kh = -w_cha / (2.0 * fs) z1 = (1.0 + kl) / (1.0 - kl) p1 = (1.0 + kh) / (1.0 - kh) b0 = (1.0 - kl) / (1.0 - kh) # Since H(s = infinity) = 1.0, then H(z = -1) = 1.0 and # this filter has 0 dB gain at fs/2.0. # That isn't what users are going to expect, so adjust with a # gain, g, so that H(z = 1) = 1.0 for 0 dB gain at DC. w_0dB = 2.0 * math.pi * 0.0 g = abs(1.0 - p1 * cmath.rect(1.0, -w_0dB)) \ / (b0 * abs(1.0 - z1 * cmath.rect(1.0, -w_0dB))) btaps =
<filename>tests/test_base.py """Unit tests for instrupy.base. """ import unittest import numpy as np import random from deepdiff import DeepDiff from instrupy import InstrumentModelFactory, Instrument from instrupy.basic_sensor_model import BasicSensorModel from instrupy.passive_optical_scanner_model import PassiveOpticalScannerModel from instrupy.synthetic_aperture_radar_model import SyntheticApertureRadarModel from instrupy.radiometer_model import RadiometerModel from instrupy.util import SphericalGeometry, Orientation, ViewGeometry, Maneuver class TestInstrumentModelFactory(unittest.TestCase): class DummyNewInstrument: def __init__(self, *args, **kwargs): pass def from_dict(self): return TestInstrumentModelFactory.DummyNewInstrument() def test___init__(self): factory = InstrumentModelFactory() # test the built-in instrumnet models are registered self.assertIn('Basic Sensor', factory._creators) self.assertEqual(factory._creators['Basic Sensor'], BasicSensorModel) self.assertIn('Passive Optical Scanner', factory._creators) self.assertEqual(factory._creators['Passive Optical Scanner'], PassiveOpticalScannerModel) self.assertIn('Synthetic Aperture Radar', factory._creators) self.assertEqual(factory._creators['Synthetic Aperture Radar'], SyntheticApertureRadarModel) self.assertIn('Radiometer', factory._creators) self.assertEqual(factory._creators['Radiometer'], RadiometerModel) def test_register_instrument_model(self): factory = InstrumentModelFactory() factory.register_instrument_model('New Instrument 2021', TestInstrumentModelFactory.DummyNewInstrument) self.assertIn('New Instrument 2021', factory._creators) self.assertEqual(factory._creators['New Instrument 2021'], TestInstrumentModelFactory.DummyNewInstrument) # test the built-in instrumnet models remain registered self.assertIn('Basic Sensor', factory._creators) self.assertEqual(factory._creators['Basic Sensor'], BasicSensorModel) self.assertIn('Passive Optical Scanner', factory._creators) self.assertEqual(factory._creators['Passive Optical Scanner'], PassiveOpticalScannerModel) self.assertIn('Synthetic Aperture Radar', factory._creators) self.assertEqual(factory._creators['Synthetic Aperture Radar'], SyntheticApertureRadarModel) self.assertIn('Radiometer', factory._creators) self.assertEqual(factory._creators['Radiometer'], RadiometerModel) def test_get_instrument_model(self): factory = InstrumentModelFactory() # register a dummy instrument model factory.register_instrument_model('New Instrument 2021', TestInstrumentModelFactory.DummyNewInstrument) # test the instrument model classes can be obtained depending on the input specifications # basic sensor model specs = {"@type": 'Basic Sensor'} # in practice additional instrument specs shall be present in the dictionary bs_model = factory.get_instrument_model(specs) self.assertIsInstance(bs_model, BasicSensorModel) # PassiveOpticalScannerModel, specs = {"@type": 'Passive Optical Scanner', 'scanTechnique': 'PUSHBROOM', "numberDetectorRows":1, "numberDetectorCols":500, "fieldOfViewGeometry":{"shape":"rectangular", "angleWidth": 5, "angleHeight": 5}} # in practice additional instrument specs shall be present in the dictionary ps_model = factory.get_instrument_model(specs) self.assertIsInstance(ps_model, PassiveOpticalScannerModel) # SyntheticApertureRadarModel specs = {"@type": 'Synthetic Aperture Radar', "minimumPRF": 2000, "maximumPRF": 8000, "operatingFrequency": 9.6e9, "antennaHeight":5, "antennaWidth":0.5} # in practice additional instrument specs shall be present in the dictionary sar_model = factory.get_instrument_model(specs) self.assertIsInstance(sar_model, SyntheticApertureRadarModel) # RadiometerModel specs = {"@type": 'Radiometer'} # in practice additional instrument specs shall be present in the dictionary rad_model = factory.get_instrument_model(specs) self.assertIsInstance(rad_model, RadiometerModel) # DummyNewInstrument specs = {"@type": 'New Instrument 2021'} # in practice additional instrument specs shall be present in the dictionary di_model = factory.get_instrument_model(specs) self.assertIsInstance(di_model, TestInstrumentModelFactory.DummyNewInstrument) class TestInstrument(unittest.TestCase): bs1 = Instrument.from_json('{"name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \ "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, \ "fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":2.5 }, \ "sceneFieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, \ "maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, \ "pointingOption": [{"referenceFrame": "NADIR_POINTING", "convention": "XYZ", "xRotation":0, "yRotation":2.5, "zRotation":0}, \ {"referenceFrame": "NADIR_POINTING", "convention": "XYZ", "xRotation":0, "yRotation":-2.5, "zRotation":0} \ ], \ "numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor" \ }') bs2 = Instrument.from_json('{"name": "Beta", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \ "fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, \ "maneuver":{"maneuverType": "SINGLE_ROLL_ONLY", "A_rollMin":10, "A_rollMax":15}, \ "mode": [{"@id":101, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}} \ ], \ "numberDetectorRows":5, "numberDetectorCols":10, "@type":"Basic Sensor" \ }') bs3 = Instrument.from_json('{"name": "Gamma", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \ "fieldOfViewGeometry": {"shape": "RECTANGULAR", "angleHeight":0.25, "angleWidth":10 }, \ "sceneFieldOfViewGeometry": {"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":10 }, \ "maneuver":{"maneuverType": "Double_Roll_Only", "A_rollMin":10, "A_rollMax":15, "B_rollMin":-15, "B_rollMax":-10}, \ "pointingOption": [{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle":10}, \ {"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle":15} \ ], \ "mode": [{"@id":0, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}}, \ {"@id":1, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle": 25}}, \ { "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle": -25}} \ ], \ "numberDetectorRows":5, "numberDetectorCols":10, "@id": "bs3", "@type":"Basic Sensor" \ }') def test_from_json_basic_sensor(self): # test initialization with no mode specification self.assertEqual(TestInstrument.bs1.name, "Alpha") self.assertEqual(TestInstrument.bs1._id, "bs1") self.assertEqual(TestInstrument.bs1._type, "Basic Sensor") self.assertIsInstance(TestInstrument.bs1, Instrument) self.assertEqual(len(TestInstrument.bs1.mode), 1) self.assertIsInstance(TestInstrument.bs1.mode[0], BasicSensorModel) mode0 = TestInstrument.bs1.mode[0] self.assertEqual(mode0._id, "0") self.assertEqual(mode0.mass, 10) self.assertEqual(mode0.volume, 12.45) self.assertEqual(mode0.dataRate, 40) self.assertEqual(mode0.bitsPerPixel, 8) self.assertEqual(mode0.power, 12) self.assertEqual(mode0.numberDetectorRows, 5) self.assertEqual(mode0.numberDetectorCols, 10) self.assertEqual(mode0.orientation, Orientation.from_json('{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}')) self.assertEqual(mode0.fieldOfView, ViewGeometry.from_dict({"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, "sphericalGeometry": {"shape": "CIRCULAR", "diameter":2.5 }})) self.assertEqual(mode0.sceneFieldOfView, ViewGeometry.from_dict({"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, "sphericalGeometry": {"shape": "CIRCULAR", "diameter":5 }})) self.assertEqual(mode0.maneuver, Maneuver.from_json('{"maneuverType": "CIRCULAR", "diameter": 10}')) self.assertEqual(mode0.fieldOfRegard, [ViewGeometry.from_dict({"orientation": {"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}, "sphericalGeometry": {"shape": "CIRCULAR", "diameter":15 }})]) self.assertEqual(mode0.pointingOption, [Orientation.from_dict({"referenceFrame": "NADIR_POINTING", "convention": "XYZ", "xRotation":0, "yRotation":2.5, "zRotation":0}), Orientation.from_dict({"referenceFrame": "NADIR_POINTING", "convention": "XYZ", "xRotation":0, "yRotation":-2.5, "zRotation":0})]) # test initialization with single mode specification self.assertEqual(TestInstrument.bs2.name, "Beta") self.assertIsNotNone(TestInstrument.bs2._id) # a random id shall be assigned self.assertEqual(TestInstrument.bs2._type, "Basic Sensor") self.assertIsInstance(TestInstrument.bs2, Instrument) self.assertEqual(len(TestInstrument.bs2.mode), 1) self.assertIsInstance(TestInstrument.bs2.mode[0], BasicSensorModel) mode0 = TestInstrument.bs2.mode[0] self.assertEqual(mode0._id, 101) self.assertEqual(mode0.mass, 10) self.assertEqual(mode0.volume, 12.45) self.assertEqual(mode0.dataRate, 40) self.assertEqual(mode0.bitsPerPixel, 8) self.assertEqual(mode0.power, 12) self.assertEqual(mode0.numberDetectorRows, 5) self.assertEqual(mode0.numberDetectorCols, 10) self.assertEqual(mode0.orientation, Orientation.from_json('{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}')) self.assertEqual(mode0.fieldOfView, ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, "sphericalGeometry":{"shape": "CIRCULAR", "diameter": 5}})) self.assertEqual(mode0.sceneFieldOfView, mode0.fieldOfView) self.assertEqual(mode0.maneuver, Maneuver.from_json('{"maneuverType": "single_ROLL_ONLY", "A_rollMin": 10, "A_rollMax":15}')) self.assertIsNone(mode0.pointingOption) # test initialization with multiple mode specifications self.assertEqual(TestInstrument.bs3.name, "Gamma") self.assertEqual(TestInstrument.bs3._id, "bs3") self.assertEqual(TestInstrument.bs3._type, "Basic Sensor") self.assertIsInstance(TestInstrument.bs3, Instrument) self.assertEqual(len(TestInstrument.bs3.mode), 3) self.assertIsInstance(TestInstrument.bs3.mode[0], BasicSensorModel) # mode0 mode0 = TestInstrument.bs3.mode[0] self.assertEqual(mode0._id, 0) self.assertEqual(mode0.mass, 10) self.assertEqual(mode0.volume, 12.45) self.assertEqual(mode0.dataRate, 40) self.assertEqual(mode0.bitsPerPixel, 8) self.assertEqual(mode0.power, 12) self.assertEqual(mode0.numberDetectorRows, 5) self.assertEqual(mode0.numberDetectorCols, 10) self.assertEqual(mode0.orientation, Orientation.from_json('{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}')) self.assertEqual(mode0.fieldOfView, ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, "sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":0.25, "angleWidth":10}})) self.assertEqual(mode0.sceneFieldOfView, ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, "sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":10}})) self.assertEqual(mode0.maneuver, Maneuver.from_json('{"maneuverType": "double_roll_only", "A_rollMin": 10, "A_rollMax":15, "B_rollMin":-15, "B_rollMax":-10}')) ddiff = DeepDiff(mode0.fieldOfRegard, [ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_Look", "sideLookAngle":12.5}, "sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":15}}), ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_Look", "sideLookAngle":-12.5}, "sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":15}}) ], significant_digits=7, ignore_numeric_type_changes=True) self.assertEqual(ddiff, {}, msg=ddiff) self.assertEqual(mode0.pointingOption, [Orientation.from_dict({"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle":10}), Orientation.from_dict({"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle":15})]) # mode1 mode1 = TestInstrument.bs3.mode[1] self.assertEqual(mode1._id, 1) self.assertEqual(mode1.mass, 10) self.assertEqual(mode1.volume, 12.45) self.assertEqual(mode1.dataRate, 40) self.assertEqual(mode1.bitsPerPixel, 8) self.assertEqual(mode1.power, 12) self.assertEqual(mode1.numberDetectorRows, 5) self.assertEqual(mode1.numberDetectorCols, 10) self.assertEqual(mode0.fieldOfView, ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, "sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":0.25, "angleWidth":10}})) self.assertEqual(mode0.sceneFieldOfView, ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, "sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":10}})) self.assertEqual(mode0.maneuver, Maneuver.from_json('{"maneuverType": "double_roll_only", "A_rollMin": 10, "A_rollMax":15, "B_rollMin":-15, "B_rollMax":-10}')) ddiff = DeepDiff(mode0.fieldOfRegard, [ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_Look", "sideLookAngle":12.5}, "sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":15}}), ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_Look", "sideLookAngle":-12.5}, "sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":15}}) ], significant_digits=7, ignore_numeric_type_changes=True) self.assertEqual(ddiff, {}, msg=ddiff) self.assertEqual(mode1.pointingOption, mode0.pointingOption) # mode2 mode2 = TestInstrument.bs3.mode[2] self.assertIsNotNone(mode2._id) self.assertEqual(mode2.mass, 10) self.assertEqual(mode2.volume, 12.45) self.assertEqual(mode2.dataRate, 40) self.assertEqual(mode2.bitsPerPixel, 8) self.assertEqual(mode2.power, 12) self.assertEqual(mode2.numberDetectorRows, 5) self.assertEqual(mode2.numberDetectorCols, 10) self.assertEqual(mode2.orientation, Orientation.from_json('{"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle": -25}')) self.assertEqual(mode0.fieldOfView, ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, "sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":0.25, "angleWidth":10}})) self.assertEqual(mode0.sceneFieldOfView, ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, "sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":10}})) self.assertEqual(mode0.maneuver, Maneuver.from_json('{"maneuverType": "double_roll_only", "A_rollMin": 10, "A_rollMax":15, "B_rollMin":-15, "B_rollMax":-10}')) ddiff = DeepDiff(mode0.fieldOfRegard, [ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_Look", "sideLookAngle":12.5}, "sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":15}}), ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_Look", "sideLookAngle":-12.5}, "sphericalGeometry":{"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":15}}) ], significant_digits=7, ignore_numeric_type_changes=True) self.assertEqual(ddiff, {}, msg=ddiff) self.assertEqual(mode2.pointingOption, mode0.pointingOption) def test_get_type(self): self.assertEqual(TestInstrument.bs1.get_type(), 'Basic Sensor') self.assertEqual(TestInstrument.bs2.get_type(), 'Basic Sensor') self.assertEqual(TestInstrument.bs3.get_type(), 'Basic Sensor') def test_get_id(self): self.assertEqual(TestInstrument.bs1.get_id(), "bs1") self.assertIsNotNone(TestInstrument.bs2.get_id()) self.assertEqual(TestInstrument.bs3.get_id(), "bs3") def test_get_mode_id(self): #@TODO pass def test_get_mode(self): #@TODO pass def test_get_field_of_regard(self): #@TODO # bs1 # no input mode-id self.assertEqual(TestInstrument.bs1.get_field_of_regard(), [ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}, "sphericalGeometry": {"shape": "CIRCULAR", "diameter": 15}})]) # input correct mode-id self.assertEqual(TestInstrument.bs1.get_field_of_regard(mode_id="0"), [ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}, "sphericalGeometry": {"shape": "CIRCULAR", "diameter": 15}})]) # input incorrect mode-id, should default to first mode self.assertEqual(TestInstrument.bs1.get_field_of_regard(mode_id="abc"), [ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}, "sphericalGeometry": {"shape": "CIRCULAR", "diameter": 15}})]) # bs2 # no input mode-id self.assertEqual(TestInstrument.bs2.get_field_of_regard(), [ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle": 12.5},"sphericalGeometry": {"shape": "rectangular", "angleHeight": 5, "angleWidth":10}})]) # input correct mode-id ddiff = DeepDiff(TestInstrument.bs2.get_field_of_regard(mode_id=101), [ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle": 12.5},"sphericalGeometry": {"shape": "rectangular", "angleHeight": 5, "angleWidth":10}})], ignore_numeric_type_changes=True) self.assertEqual(ddiff, {}) # input incorrect mode-id, should default to first mode self.assertEqual(TestInstrument.bs2.get_field_of_regard(mode_id="abc"), [ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle": 12.5},"sphericalGeometry": {"shape": "rectangular", "angleHeight": 5, "angleWidth":10}})]) # bs3, all modes have the same field of regard # no input mode-id ddiff = DeepDiff(TestInstrument.bs3.get_field_of_regard()[0], ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle": 12.5},"sphericalGeometry": {"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":15 }}), significant_digits=7, ignore_numeric_type_changes=True) self.assertEqual(ddiff, {}, msg=ddiff) ddiff = DeepDiff(TestInstrument.bs3.get_field_of_regard()[1], ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle": -12.5},"sphericalGeometry": {"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":15 }}), significant_digits=7, ignore_numeric_type_changes=True) self.assertEqual(ddiff, {}, msg=ddiff) # input correct mode-id ddiff = DeepDiff(TestInstrument.bs3.get_field_of_regard(mode_id=0)[0], ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle": 12.5},"sphericalGeometry": {"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":15 }}), significant_digits=7, ignore_numeric_type_changes=True) self.assertEqual(ddiff, {}, msg=ddiff) ddiff = DeepDiff(TestInstrument.bs3.get_field_of_regard(mode_id=0)[1], ViewGeometry.from_dict({"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle": -12.5},"sphericalGeometry": {"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":15 }}), significant_digits=7, ignore_numeric_type_changes=True) self.assertEqual(ddiff, {}, msg=ddiff) # input incorrect mode-id, should default to first mode self.assertEqual(TestInstrument.bs3.get_field_of_regard(mode_id='abc'), TestInstrument.bs3.get_field_of_regard(mode_id=0)) # next mode self.assertEqual(TestInstrument.bs3.get_field_of_regard(mode_id=1), TestInstrument.bs3.get_field_of_regard(mode_id=0)) # next mode, mode_id = TestInstrument.bs3.mode_id[2] self.assertEqual(TestInstrument.bs3.get_field_of_regard(mode_id=mode_id), TestInstrument.bs3.get_field_of_regard(mode_id=0)) def test_get_field_of_view(self): # bs1 # no input mode-id self.assertEqual(TestInstrument.bs1.get_field_of_view(), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},"sphericalGeometry": {"shape": "CIRCULAR", "diameter": 2.5}})) # input correct mode-id self.assertEqual(TestInstrument.bs1.get_field_of_view(mode_id="0"), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},"sphericalGeometry": {"shape": "CIRCULAR", "diameter": 2.5}})) # input incorrect mode-id, should default to first mode self.assertEqual(TestInstrument.bs1.get_field_of_view(mode_id="abc"), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},"sphericalGeometry": {"shape": "CIRCULAR", "diameter": 2.5}})) # bs2 # no input mode-id self.assertEqual(TestInstrument.bs2.get_field_of_view(), ViewGeometry.from_dict({"orientation":{"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},"sphericalGeometry": {"shape": "CIRCULAR", "diameter":
# coding: utf-8 # # Known issues: Recentering on resize and when switching between # different image types. Ring centre on image switch. from __future__ import absolute_import, division, print_function import imp import math import os import wx from . import pyslip from . import tile_generation from ..rstbx_frame import EVT_EXTERNAL_UPDATE from ..rstbx_frame import XrayFrame as XFBaseClass from rstbx.viewer import settings as rv_settings, image as rv_image from wxtbx import bitmaps pyslip._Tiles = tile_generation._Tiles class chooser_wrapper(object): def __init__(self, image_set, index): self.image_set = image_set self.path = os.path.basename(image_set.get_path(index)) self.full_path = image_set.get_path(index) self.index = index self._raw_data = None def __str__(self): return "%s [%d]" % (self.path, self.index + 1) def get_detector(self): return self.image_set.get_detector() def get_scan(self): return self.image_set.get_scan() def get_beam(self): return self.image_set.get_beam() def get_mask(self): return self.image_set.get_mask(self.index) def get_raw_data(self): if self._raw_data is None: return self.image_set[self.index] return self._raw_data def set_raw_data(self, raw_data): self._raw_data = raw_data def get_detectorbase(self): return self.image_set.get_detectorbase(self.index) def get_vendortype(self): return self.image_set.get_vendortype(self.index) def show_header(self): return self.image_set.get_detectorbase(self.index).show_header() class XrayFrame(XFBaseClass): def set_pyslip(self, parent): self.pyslip = pyslip.PySlip(parent, tile_dir=None, min_level=0) def __init__(self, *args, **kwds): self.params = kwds.get("params", None) if "params" in kwds: del kwds["params"] # otherwise wx complains ### Collect any plugins slip_viewer_dir = os.path.join(os.path.dirname(__file__)) contents = os.listdir(slip_viewer_dir) plugin_names = [ f.split(".py")[0] for f in contents if f.endswith("_frame_plugin.py") ] self.plugins = {} for name in plugin_names: self.plugins[name] = imp.load_source( name, os.path.join(slip_viewer_dir, name + ".py") ) if len(plugin_names) > 0: print("Loaded plugins: " + ", ".join(plugin_names)) wx.Frame.__init__(self, *args, **kwds) self.settings = rv_settings() self.sizer = wx.BoxSizer(wx.VERTICAL) self.SetSizer(self.sizer) # initialization is done in stages as windows are created self.pyslip = None self.viewer = wx.Panel(self, wx.ID_ANY) self.viewer.SetMinSize((640, 640)) self.viewer.SetBackgroundColour(wx.BLACK) self.viewer.ClearBackground() self.sizer.Add(self.viewer, 1, wx.EXPAND) self.statusbar = self.CreateStatusBar() self.settings_frame = None self._calibration_frame = None self._ring_frame = None self._uc_frame = None self._score_frame = None self._plugins_frame = {key: None for key in self.plugins} self.zoom_frame = None self.plot_frame = None self.metrology_matrices = None # Currently displayed image. XXX Can this be zapped? self._img = None self._distl = None self.toolbar = self.CreateToolBar(style=wx.TB_3DBUTTONS | wx.TB_TEXT) self.setup_toolbar() self.toolbar.Realize() self.mb = wx.MenuBar() self.setup_menus() self.SetMenuBar(self.mb) self.Fit() self.SetMinSize(self.GetSize()) self.SetSize((720, 720)) self.Bind(EVT_EXTERNAL_UPDATE, self.OnExternalUpdate) self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateUICalibration, id=self._id_calibration) self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateUINext, id=wx.ID_FORWARD) self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateUIPrevious, id=wx.ID_BACKWARD) self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateUIRing, id=self._id_ring) self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateUIUC, id=self._id_uc) self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateUIScore, id=self._id_score) for p in self.plugins: self.Bind( wx.EVT_UPDATE_UI, self.OnUpdateUIPluginWrapper(p), id=self._id_plugins[p], ) # consolidate initialization of PySlip object into a single function def init_pyslip(self): self.set_pyslip(self.viewer) self.init_pyslip_presizer() def Show(self): # Due to the asynchronous nature of X11 on Linux, just showing a frame # does not guarantee window creation. The frame calls Raise() so that it # will be shown. This addresses an error with PySlip requiring the # window to exist before instantiation. super(XrayFrame, self).Show() self.Raise() def setup_toolbar(self): XFBaseClass.setup_toolbar(self) btn = self.toolbar.AddLabelTool( id=wx.ID_SAVEAS, label="Save As...", bitmap=bitmaps.fetch_icon_bitmap("actions", "save_all", 32), shortHelp="Save As...", kind=wx.ITEM_NORMAL, ) self.Bind(wx.EVT_MENU, self.OnSaveAs, btn) # using StaticBox creates a horizontal white bar in Linux def make_gui(self, parent): parent.sizer = wx.BoxSizer(wx.HORIZONTAL) parent.SetSizer(parent.sizer) parent.sizer.Add(self.pyslip, 1, wx.EXPAND) def init_pyslip_presizer(self): self.demo_select_dispatch = {} # self.tile_directory = None#"/Users/nksauter/rawdata/demo/insulin_1_001.img" # build the GUI self.make_gui(self.viewer) # finally, bind event to handler self.pyslip.Bind(pyslip.EVT_PYSLIP_POSITION, self.handle_position_event) def handle_position_event(self, event): """Handle a pySlip POSITION event.""" posn_str = "" if event.position: (lon, lat) = event.position fast_picture, slow_picture = self.pyslip.tiles.lon_lat_to_picture_fast_slow( lon, lat ) posn_str = "Picture: slow=%.3f / fast=%.3f pixels." % ( slow_picture, fast_picture, ) coords = self.pyslip.tiles.get_flex_pixel_coordinates(lon, lat) if len(coords) >= 2: if len(coords) == 3: readout = int(round(coords[2])) else: readout = -1 coords_str = "slow=%.3f / fast=%.3f pixels" % (coords[0], coords[1]) if len(coords) == 2: posn_str += " Readout: " + coords_str + "." elif readout >= 0: posn_str += " Readout %d: %s." % (readout, coords_str) possible_intensity = None fi = self.pyslip.tiles.raw_image detector = fi.get_detector() ifs = (int(coords[1]), int(coords[0])) # int fast slow isf = (int(coords[0]), int(coords[1])) # int slow fast raw_data = fi.get_raw_data() if not isinstance(raw_data, tuple): raw_data = (raw_data,) if len(detector) > 1: if readout >= 0: if detector[readout].is_coord_valid(ifs): possible_intensity = raw_data[readout][isf] else: if detector[0].is_coord_valid(ifs): possible_intensity = raw_data[0][isf] if possible_intensity is not None: if possible_intensity == 0: format_str = " I=%6.4f" else: yaya = int(math.ceil(math.log10(abs(possible_intensity)))) format_str = " I=%%6.%df" % (max(0, 5 - yaya)) posn_str += format_str % possible_intensity if ( len(coords) > 2 and readout >= 0 ): # indicates it's a tiled image in a valid region reso = self.pyslip.tiles.get_resolution( coords[1], coords[0], readout ) else: reso = self.pyslip.tiles.get_resolution(coords[1], coords[0]) if reso is not None: posn_str += " Resolution: %.3f" % (reso) self.statusbar.SetStatusText(posn_str) else: self.statusbar.SetStatusText( "Click and drag to pan; " + "middle-click and drag to plot intensity profile, right-click to zoom" ) # print "event with no position",event return def init_pyslip_postsizer(self): self.pyslip.ZoomToLevel(-2) # tiles.zoom_level self.pyslip.GotoPosition( self.pyslip.tiles.get_initial_instrument_centering_within_picture_as_lon_lat() ) def setup_menus(self): file_menu = wx.Menu() self.mb.Append(file_menu, "File") item = file_menu.Append(-1, "Open integration results...") self.Bind(wx.EVT_MENU, self.OnLoadIntegration, item) item = file_menu.Append(-1, "Open image...") self.Bind(wx.EVT_MENU, self.OnLoadFile, item) self._actions_menu = wx.Menu() self.mb.Append(self._actions_menu, "Actions") # item = self._actions_menu.Append(-1, "Change beam center...") # self.Bind(wx.EVT_MENU, self.OnChangeBeamCenter, item) # item = self._actions_menu.Append(-1, "Reset beam center to header value") # self.Bind(wx.EVT_MENU, lambda evt: self.viewer.ResetBeamCenter(), item) item = self._actions_menu.Append(-1, "Save As...") self.Bind(wx.EVT_MENU, self.OnSaveAs, item) # Known wxWidgets/wxPython issue # (http://trac.wxwidgets.org/ticket/12394): stock item ID is # expected for zero-length text. Work around by making text # contain single space. XXX Placement self._id_calibration = wx.NewId() item = self._actions_menu.Append(self._id_calibration, " ") self.Bind(wx.EVT_MENU, self.OnCalibration, source=item) # XXX Placement self._id_ring = wx.NewId() item = self._actions_menu.Append(self._id_ring, " ") self.Bind(wx.EVT_MENU, self.OnRing, source=item) # XXX Placement self._id_uc = wx.NewId() item = self._actions_menu.Append(self._id_uc, " ") self.Bind(wx.EVT_MENU, self.OnUC, source=item) # XXX Placement self._id_score = wx.NewId() item = self._actions_menu.Append(self._id_score, " ") self.Bind(wx.EVT_MENU, self.OnScore, source=item) self._id_plugins = {} for p in self.plugins: self._id_plugins[p] = wx.NewId() item = self._actions_menu.Append(self._id_plugins[p], " ") self.Bind(wx.EVT_MENU, self.OnPluginWrapper(p), source=item) def has_four_quadrants(self): d = self.pyslip.tiles.raw_image.get_detector() return len(d) > 1 and len(d.hierarchy()) == 4 def add_file_name_or_data(self, file_name_or_data): """The add_file_name_or_data() function appends @p file_name_or_data to the image chooser, unless it is already present. For file-backed images, the base name is displayed in the chooser. If necessary, the number of entries in the chooser is pruned. The function returns the index of the recently added entry. XXX This is probably the place for heuristics to determine if the viewer was given a pattern, or a plain list of files. XXX Rename this function, because it only deals with the chooser? """ key = self.get_key(file_name_or_data) for i in range(self.image_chooser.GetCount()): if key == str(self.image_chooser.GetClientData(i)): return i if self.image_chooser.GetCount() >= self.CHOOSER_SIZE: self.image_chooser.Delete(0) i = self.image_chooser.GetCount() if type(file_name_or_data) is dict: self.image_chooser.Insert(key, i, None) elif isinstance(file_name_or_data, chooser_wrapper): self.image_chooser.Insert(key, i, file_name_or_data) else: self.image_chooser.Insert(os.path.basename(key), i, key) return i def get_beam_center_px(self): """ Get the beam center in pixel coordinates relative to the tile closest to it. @return panel_id, beam_center_fast, beam_center_slow. panel_id is the panel the returned coordinates are relative to. """ detector = self.get_detector() beam = self.get_beam() if abs(detector[0].get_distance()) == 0: return 0.0, 0.0 # FIXME assumes all detector elements use the same millimeter-to-pixel convention try: # determine if the beam intersects one of the panels panel_id, (x_mm, y_mm) = detector.get_ray_intersection(beam.get_s0()) except RuntimeError as e: if not ("DXTBX_ASSERT(" in str(e) and ") failure" in str(e)): # unknown exception from dxtbx raise e if len(detector) > 1: # find the panel whose center is closest to the beam. panel_id = 0 lowest_res = 0 for p_id, panel in enumerate(detector): w, h = panel.get_image_size() res = panel.get_resolution_at_pixel(beam.get_s0(), (w // 2, h // 2)) if res > lowest_res: panel_id = p_id lowest_res = res x_mm, y_mm = detector[panel_id].get_beam_centre(beam.get_s0()) else: panel_id = 0 # FIXME this is horrible but cannot find easier way without # restructuring code - N.B. case I am debugging now is one # panel detector *parallel to beam* for which the question is # ill posed. try: x_mm, y_mm = detector[0].get_beam_centre(beam.get_s0()) except RuntimeError as e: if "DXTBX_ASSERT" in str(e): x_mm, y_mm = 0.0, 0.0 else: raise e beam_pixel_fast, beam_pixel_slow = detector[panel_id].millimeter_to_pixel( (x_mm, y_mm) ) return panel_id, beam_pixel_fast, beam_pixel_slow def load_image(self, file_name_or_data, get_raw_data=None, show_untrusted=False): """The load_image() function displays the image from @p file_name_or_data. The chooser is updated appropriately. """ # Due to a bug in wxPython 3.0.2 for Linux # http://trac.wxwidgets.org/ticket/16034 # the creation of the PySlip object is deferred until it is needed and # after other
want to change the documentation of an endpoint, you can either use the update_docs() function or the online admin interface at https://admin.sclble.net. Args: path: The path referencing the onnx model location (i.e., the .onnx file location). cfid: a string with a valid computeFunction ID. example: String example input for the onnx file. docs: A dict{} containing the fields 'name' and 'documentation'. email: Bool indicating whether a confirmation email of a successful conversion should be send. Default True. Returns: False if upload failed, true otherwise Raises (in debug mode): UploadModelError if unable to successfully bundle and upload the model. """ if glob.DEBUG: print("We are checking your updated .onnx file...") bundle = {} # simply add example string to bundle: bundle['example'] = example # check the docs: bundle['docs'] = {} if docs: bundle['docs'] = docs else: try: # This should work, but catching the exception just in case: name = path except Exception: name = "NAMELESS MODEL" bundle['docs']['name'] = name bundle['docs']['documentation'] = "-- EMPTY --" if not glob.SILENT: print("WARNING: You did not provide any documentation. \n" "We will simply use " + name + " as its name without further documentation.") # check if file exists: if not path.endswith('.onnx'): if not glob.SILENT: print("FATAL: You did not specify a .onnx path. \n" "Your model has not been updated. \n") if glob.DEBUG: raise UploadModelError("We were unable to open the specified onnx file (no .onnx extension).") # try to open the file try: files = [('bundle', open(path, 'rb'))] except Exception: if not glob.SILENT: print("FATAL: We were unable to open the specified onnx file. Is the path correct? \n" "Your model has not been updated. \n") if glob.DEBUG: raise UploadModelError("We were unable to open the specified onnx file.") return False # check authorization: auth = _check_jwt() if not auth: if not glob.SILENT: print("FATAL: We were unable to obtain JWT authorization for your account. \n" "Your model has not been updated. \n") if glob.DEBUG: raise UploadModelError("We were unable to obtain JWT authorization.") return False # get system information bundle['system_info'] = _get_system_info() # all ok, upload: if auth: url = glob.TOOLCHAIN_URL + "/upload/" + glob.JWT_USER_ID + "/" + cfid # Hard coded tootlchain name: toolchain_name = "onnx2c" # Setup the actual request data: dict = { 'email': email, 'package': __version__, 'toolchain': toolchain_name, 'name': bundle['docs'].get('name', "No name found."), 'docs': bundle['docs'].get('documentation', "No docs provided."), 'exampleInput': example, 'exampleOutput': "" } payload: dict = { 'data': json.dumps(data) } headers = { 'Authorization': glob.JWT_TOKEN, } try: response = requests.put(url, headers=headers, data=payload, files=files) except Exception as e: if not glob.SILENT: print("FATAL: Unable to carry out the update request: the toolchain is not available. \n" "Your model has not been updated. \n") if glob.DEBUG: raise UploadModelError("We were unable to obtain JWT authorization: " + str(e)) return False # Error handling try: response_data = json.loads(response.text) except Exception as e: if not glob.SILENT: print("FATAL: We did not receive a valid JSON response from the toolchain-server. \n" "Your model has not been updated. \n") if glob.DEBUG: raise UploadModelError("We did not receive a valid response from the server: " + str(e)) return False if response_data['error']: if not glob.SILENT: print("FATAL: An error was returned by the toolchain-server. \n" "Your model has not been updated. \n") if glob.DEBUG: raise UploadModelError("We did not receive a valid response from the server: " + response_data['error']) return False if glob.DEBUG: print("The following content has been send to the toolchain server:") print(bundle) # user feedback: if not glob.SILENT: print("Your model was successfully submitted for an update. \n") return True else: return False # update_sklearn updates an sklearn model def update_sklearn(mod, feature_vector, cfid, docs={}, email=True, _keep=False) -> bool: """Updates an already existing sklearn model. The update function is similar to the upload function in most respects (so, see its docs), but instead of creating a new endpoint it overwrites the docs and model of an already existing endpoint. Thus, there is an additional argument "cfid" providing the computeFunction Id of the endpoint that needs to be updated. Use sp.list_models() to get a list of your already existing endpoints. Note: If you solely want to change the documentation of an endpoint, you can either use the update_docs() function or the online admin interface at https://admin.sclble.net. Args: mod: The model to be uploaded. feature_vector: An example feature_vector for your model. (i.e., the first row of your training data X obtained using row = X[0,:]) cfid: a string with a valid computeFunction ID. docs: A dict{} containing the fields 'name' and 'documentation'. Default {}. email: Bool indicating whether a confirmation email of a successful conversion should be send. Default True. _keep: Bool indicating whether the .gzipped file should be retained. Default False. Returns: False if upload failed, true otherwise Raises (in debug mode): UploadModelError if unable to successfully bundle and upload the model. """ if glob.DEBUG: print("We are checking your updated sklearn model...") bundle = {} # Check model: if not _check_model(mod): if not glob.SILENT: print("FATAL: The model you are trying to upload is not (yet) supported or has not been fitted. \n" "Please see README.md for a list of supported models. \n" "Your models has not been uploaded. \n") if glob.DEBUG: raise UploadModelError("The submitted model is not supported.") return False bundle['fitted_model'] = mod # check input vector and generate an example: bundle['example'] = {} if feature_vector.any(): # try prediction try: output = _predict(mod, feature_vector) # predict raises error if unable to generate predcition except Exception as e: if not glob.SILENT: print("FATAL: We were unable to create an example inference. \n" "Your model has not been updated. \n") if glob.DEBUG: raise UploadModelError("Unable to generate prediction: " + str(e)) return False # format data: input_str: str = '[[%s]]' % ', '.join([str(i) for i in feature_vector.tolist()]) example = {'input': input_str, "output": json.dumps(output)} bundle['example'] = example else: if not glob.SILENT: print("FATAL: You did not provide a required example instance. (see docs). \n" "Your model has not been updated. \n") return False # check the docs: bundle['docs'] = {} docs_update = True if docs: bundle['docs'] = docs else: docs_update = False if not glob.SILENT: print("WARNING: You did not provide any documentation. \n" "We will only update the .wasm model, not the documentation.") # check authorization: auth = _check_jwt() if not auth: if not glob.SILENT: print("FATAL: We were unable to obtain JWT authorization for your account. \n" "Your model has not been updated. \n") if glob.DEBUG: raise UploadModelError("We were unable to obtain JWT authorization.") return False # get system information bundle['system_info'] = _get_system_info() # gzip the bundle if not _gzip_save(bundle): if not glob.SILENT: print("FATAL: We were unable to gzip your model bundle. \n" "Your model has not been updated. \n") if glob.DEBUG: raise UploadModelError("We were unable to save the bundle.") return False # all ok, upload: if auth: url = glob.TOOLCHAIN_URL + "/upload/" + glob.JWT_USER_ID + "/" + cfid # Map python package to the right toolchain: pkg_name = _get_model_package(mod) toolchain_name = "" if pkg_name == "sklearn": toolchain_name = "sklearn" elif pkg_name == "statsmodels": toolchain_name = "sklearn" elif pkg_name == "xgboost": toolchain_name = "sklearn" elif pkg_name == "lightgbm": toolchain_name = "sklearn" # Setup the actual request data: dict = { 'email': email, 'package': __version__, 'toolchain': toolchain_name, 'name': bundle['docs'].get('name', ""), 'docs': bundle['docs'].get('documentation', ""), 'updateDocs': docs_update, 'exampleInput': bundle['example'].get('input', "[]"), 'exampleOutput': bundle['example'].get('output', "[]") } payload: dict = { 'data': json.dumps(data) } files = [('bundle', open(glob.GZIP_BUNDLE, 'rb'))] headers = { 'Authorization': glob.JWT_TOKEN } # Do the request (and make sure to delete the gzip if errors occur) try: response = requests.put(url, headers=headers, data=payload, files=files) except Exception as e: if not glob.SILENT: print("FATAL: Unable to carry out the upload request: the toolchain is not available. \n" "Your model has not been updated. \n") if not _keep: _gzip_delete() if glob.DEBUG: raise UploadModelError("We were unable to obtain JWT authorization: " + str(e)) return False # Error handling try: response_data = json.loads(response.text) except Exception as
listener.enterForStatement(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitForStatement" ): listener.exitForStatement(self) class RetStatementContext(GenStatementContext): def __init__(self, parser, ctx:ParserRuleContext): # actually a EvansParser.GenStatementContext super().__init__(parser) self.copyFrom(ctx) def RET(self): return self.getToken(EvansParser.RET, 0) def genExpression(self): return self.getTypedRuleContext(EvansParser.GenExpressionContext,0) def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterRetStatement" ): listener.enterRetStatement(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitRetStatement" ): listener.exitRetStatement(self) def genStatement(self): localctx = EvansParser.GenStatementContext(self, self._ctx, self.state) self.enterRule(localctx, 54, self.RULE_genStatement) self._la = 0 # Token type try: self.state = 355 self._errHandler.sync(self) token = self._input.LA(1) if token in [EvansParser.IF]: localctx = EvansParser.IfStatementContext(self, localctx) self.enterOuterAlt(localctx, 1) self.state = 311 self.match(EvansParser.IF) self.state = 312 self.match(EvansParser.T__3) self.state = 313 self.genExpression(0) self.state = 314 self.match(EvansParser.T__4) self.state = 315 self.genCodeBlock() self.state = 324 self._errHandler.sync(self) _la = self._input.LA(1) while _la==EvansParser.ELIF: self.state = 316 self.match(EvansParser.ELIF) self.state = 317 self.match(EvansParser.T__3) self.state = 318 self.genExpression(0) self.state = 319 self.match(EvansParser.T__4) self.state = 320 self.genCodeBlock() self.state = 326 self._errHandler.sync(self) _la = self._input.LA(1) self.state = 329 self._errHandler.sync(self) _la = self._input.LA(1) if _la==EvansParser.ELSE: self.state = 327 self.match(EvansParser.ELSE) self.state = 328 self.genCodeBlock() pass elif token in [EvansParser.WHILE]: localctx = EvansParser.WhileStatementContext(self, localctx) self.enterOuterAlt(localctx, 2) self.state = 331 self.match(EvansParser.WHILE) self.state = 332 self.match(EvansParser.T__3) self.state = 333 self.genExpression(0) self.state = 334 self.match(EvansParser.T__4) self.state = 335 self.genCodeBlock() pass elif token in [EvansParser.FOR]: localctx = EvansParser.ForStatementContext(self, localctx) self.enterOuterAlt(localctx, 3) self.state = 337 self.match(EvansParser.FOR) self.state = 338 self.match(EvansParser.T__3) self.state = 339 self.nameList() self.state = 340 self.match(EvansParser.IN) self.state = 341 self.genExpression(0) self.state = 342 self.match(EvansParser.T__4) self.state = 343 self.genCodeBlock() pass elif token in [EvansParser.RET]: localctx = EvansParser.RetStatementContext(self, localctx) self.enterOuterAlt(localctx, 4) self.state = 345 self.match(EvansParser.RET) self.state = 347 self._errHandler.sync(self) _la = self._input.LA(1) if ((((_la - 4)) & ~0x3f) == 0 and ((1 << (_la - 4)) & ((1 << (EvansParser.T__3 - 4)) | (1 << (EvansParser.T__16 - 4)) | (1 << (EvansParser.STRING_LITERAL - 4)) | (1 << (EvansParser.DECIMAL_LITERAL - 4)) | (1 << (EvansParser.FLOAT_LITERAL - 4)) | (1 << (EvansParser.BOOL_LITERAL - 4)) | (1 << (EvansParser.LIST - 4)) | (1 << (EvansParser.BOOL - 4)) | (1 << (EvansParser.STR - 4)) | (1 << (EvansParser.FLOAT - 4)) | (1 << (EvansParser.INT - 4)) | (1 << (EvansParser.NUM - 4)) | (1 << (EvansParser.VAR - 4)) | (1 << (EvansParser.ADD - 4)) | (1 << (EvansParser.SUB - 4)) | (1 << (EvansParser.ID - 4)))) != 0): self.state = 346 self.genExpression(0) self.state = 349 self.match(EvansParser.T__9) pass elif token in [EvansParser.BREAK, EvansParser.CONT]: localctx = EvansParser.BreakContStatementContext(self, localctx) self.enterOuterAlt(localctx, 5) self.state = 350 _la = self._input.LA(1) if not(_la==EvansParser.BREAK or _la==EvansParser.CONT): self._errHandler.recoverInline(self) else: self._errHandler.reportMatch(self) self.consume() self.state = 351 self.match(EvansParser.T__9) pass elif token in [EvansParser.T__3, EvansParser.T__16, EvansParser.STRING_LITERAL, EvansParser.DECIMAL_LITERAL, EvansParser.FLOAT_LITERAL, EvansParser.BOOL_LITERAL, EvansParser.LIST, EvansParser.BOOL, EvansParser.STR, EvansParser.FLOAT, EvansParser.INT, EvansParser.NUM, EvansParser.VAR, EvansParser.ADD, EvansParser.SUB, EvansParser.ID]: localctx = EvansParser.ExpressionStatementContext(self, localctx) self.enterOuterAlt(localctx, 6) self.state = 352 self.genExpression(0) self.state = 353 self.match(EvansParser.T__9) pass else: raise NoViableAltException(self) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class AssignmentStatementContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def ID(self, i:int=None): if i is None: return self.getTokens(EvansParser.ID) else: return self.getToken(EvansParser.ID, i) def genExpression(self): return self.getTypedRuleContext(EvansParser.GenExpressionContext,0) def getRuleIndex(self): return EvansParser.RULE_assignmentStatement def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterAssignmentStatement" ): listener.enterAssignmentStatement(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitAssignmentStatement" ): listener.exitAssignmentStatement(self) def assignmentStatement(self): localctx = EvansParser.AssignmentStatementContext(self, self._ctx, self.state) self.enterRule(localctx, 56, self.RULE_assignmentStatement) self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) self.state = 357 self.match(EvansParser.ID) self.state = 362 self._errHandler.sync(self) _la = self._input.LA(1) while _la==EvansParser.T__10: self.state = 358 self.match(EvansParser.T__10) self.state = 359 self.match(EvansParser.ID) self.state = 364 self._errHandler.sync(self) _la = self._input.LA(1) self.state = 365 _la = self._input.LA(1) if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << EvansParser.T__8) | (1 << EvansParser.T__11) | (1 << EvansParser.T__12) | (1 << EvansParser.T__13) | (1 << EvansParser.T__14) | (1 << EvansParser.T__15))) != 0)): self._errHandler.recoverInline(self) else: self._errHandler.reportMatch(self) self.consume() self.state = 366 self.genExpression(0) self.state = 367 self.match(EvansParser.T__9) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class GenExpressionContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def getRuleIndex(self): return EvansParser.RULE_genExpression def copyFrom(self, ctx:ParserRuleContext): super().copyFrom(ctx) class TernaryExpressionContext(GenExpressionContext): def __init__(self, parser, ctx:ParserRuleContext): # actually a EvansParser.GenExpressionContext super().__init__(parser) self.copyFrom(ctx) def genExpression(self, i:int=None): if i is None: return self.getTypedRuleContexts(EvansParser.GenExpressionContext) else: return self.getTypedRuleContext(EvansParser.GenExpressionContext,i) def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterTernaryExpression" ): listener.enterTernaryExpression(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitTernaryExpression" ): listener.exitTernaryExpression(self) class LiteralExpressionContext(GenExpressionContext): def __init__(self, parser, ctx:ParserRuleContext): # actually a EvansParser.GenExpressionContext super().__init__(parser) self.copyFrom(ctx) def genLiteral(self): return self.getTypedRuleContext(EvansParser.GenLiteralContext,0) def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterLiteralExpression" ): listener.enterLiteralExpression(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitLiteralExpression" ): listener.exitLiteralExpression(self) class VarExpressionContext(GenExpressionContext): def __init__(self, parser, ctx:ParserRuleContext): # actually a EvansParser.GenExpressionContext super().__init__(parser) self.copyFrom(ctx) def ID(self): return self.getToken(EvansParser.ID, 0) def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterVarExpression" ): listener.enterVarExpression(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitVarExpression" ): listener.exitVarExpression(self) class EqualExpressionContext(GenExpressionContext): def __init__(self, parser, ctx:ParserRuleContext): # actually a EvansParser.GenExpressionContext super().__init__(parser) self.op = None # Token self.copyFrom(ctx) def genExpression(self, i:int=None): if i is None: return self.getTypedRuleContexts(EvansParser.GenExpressionContext) else: return self.getTypedRuleContext(EvansParser.GenExpressionContext,i) def NE(self): return self.getToken(EvansParser.NE, 0) def EQ(self): return self.getToken(EvansParser.EQ, 0) def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterEqualExpression" ): listener.enterEqualExpression(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitEqualExpression" ): listener.exitEqualExpression(self) class IndexExpressionContext(GenExpressionContext): def __init__(self, parser, ctx:ParserRuleContext): # actually a EvansParser.GenExpressionContext super().__init__(parser) self.copyFrom(ctx) def genExpression(self, i:int=None): if i is None: return self.getTypedRuleContexts(EvansParser.GenExpressionContext) else: return self.getTypedRuleContext(EvansParser.GenExpressionContext,i) def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterIndexExpression" ): listener.enterIndexExpression(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitIndexExpression" ): listener.exitIndexExpression(self) class NotExpressionContext(GenExpressionContext): def __init__(self, parser, ctx:ParserRuleContext): # actually a EvansParser.GenExpressionContext super().__init__(parser) self.copyFrom(ctx) def genExpression(self): return self.getTypedRuleContext(EvansParser.GenExpressionContext,0) def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterNotExpression" ): listener.enterNotExpression(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitNotExpression" ): listener.exitNotExpression(self) class CompareExpressionContext(GenExpressionContext): def __init__(self, parser, ctx:ParserRuleContext): # actually a EvansParser.GenExpressionContext super().__init__(parser) self.op = None # Token self.copyFrom(ctx) def genExpression(self, i:int=None): if i is None: return self.getTypedRuleContexts(EvansParser.GenExpressionContext) else: return self.getTypedRuleContext(EvansParser.GenExpressionContext,i) def LT(self): return self.getToken(EvansParser.LT, 0) def GT(self): return self.getToken(EvansParser.GT, 0) def LE(self): return self.getToken(EvansParser.LE, 0) def GE(self): return self.getToken(EvansParser.GE, 0) def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterCompareExpression" ): listener.enterCompareExpression(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitCompareExpression" ): listener.exitCompareExpression(self) class OrExpressionContext(GenExpressionContext): def __init__(self, parser, ctx:ParserRuleContext): # actually a EvansParser.GenExpressionContext super().__init__(parser) self.copyFrom(ctx) def genExpression(self, i:int=None): if i is None: return self.getTypedRuleContexts(EvansParser.GenExpressionContext) else: return self.getTypedRuleContext(EvansParser.GenExpressionContext,i) def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterOrExpression" ): listener.enterOrExpression(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitOrExpression" ): listener.exitOrExpression(self) class ParensExpressionContext(GenExpressionContext): def __init__(self, parser, ctx:ParserRuleContext): # actually a EvansParser.GenExpressionContext super().__init__(parser) self.copyFrom(ctx) def genExpression(self): return self.getTypedRuleContext(EvansParser.GenExpressionContext,0) def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterParensExpression" ): listener.enterParensExpression(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitParensExpression" ): listener.exitParensExpression(self) class AddSubExpressionContext(GenExpressionContext): def __init__(self, parser, ctx:ParserRuleContext): # actually a EvansParser.GenExpressionContext super().__init__(parser) self.op = None # Token self.copyFrom(ctx) def genExpression(self, i:int=None): if i is None: return self.getTypedRuleContexts(EvansParser.GenExpressionContext) else: return self.getTypedRuleContext(EvansParser.GenExpressionContext,i) def ADD(self): return self.getToken(EvansParser.ADD, 0) def SUB(self): return self.getToken(EvansParser.SUB, 0) def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterAddSubExpression" ): listener.enterAddSubExpression(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitAddSubExpression" ): listener.exitAddSubExpression(self) class AndExpressionContext(GenExpressionContext): def __init__(self, parser, ctx:ParserRuleContext): # actually a EvansParser.GenExpressionContext super().__init__(parser) self.copyFrom(ctx) def genExpression(self, i:int=None): if i is None: return self.getTypedRuleContexts(EvansParser.GenExpressionContext) else: return self.getTypedRuleContext(EvansParser.GenExpressionContext,i) def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterAndExpression" ): listener.enterAndExpression(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitAndExpression" ): listener.exitAndExpression(self) class PrefixExpressionContext(GenExpressionContext): def __init__(self, parser, ctx:ParserRuleContext): # actually a EvansParser.GenExpressionContext super().__init__(parser) self.prefix = None # Token self.copyFrom(ctx) def genExpression(self): return self.getTypedRuleContext(EvansParser.GenExpressionContext,0) def ADD(self): return self.getToken(EvansParser.ADD, 0) def SUB(self): return self.getToken(EvansParser.SUB, 0) def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterPrefixExpression" ): listener.enterPrefixExpression(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitPrefixExpression" ): listener.exitPrefixExpression(self) class AttrExpressionContext(GenExpressionContext): def __init__(self, parser, ctx:ParserRuleContext): # actually a EvansParser.GenExpressionContext super().__init__(parser) self.copyFrom(ctx) def genExpression(self): return self.getTypedRuleContext(EvansParser.GenExpressionContext,0) def ID(self): return self.getToken(EvansParser.ID, 0) def methodCall(self): return self.getTypedRuleContext(EvansParser.MethodCallContext,0) def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterAttrExpression" ): listener.enterAttrExpression(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitAttrExpression" ): listener.exitAttrExpression(self) class CallExpressionContext(GenExpressionContext): def __init__(self, parser, ctx:ParserRuleContext): # actually a EvansParser.GenExpressionContext super().__init__(parser) self.copyFrom(ctx) def methodCall(self): return self.getTypedRuleContext(EvansParser.MethodCallContext,0) def enterRule(self, listener:ParseTreeListener): if hasattr( listener, "enterCallExpression" ): listener.enterCallExpression(self) def exitRule(self, listener:ParseTreeListener): if hasattr( listener, "exitCallExpression" ): listener.exitCallExpression(self) class MulDivExpressionContext(GenExpressionContext): def __init__(self, parser, ctx:ParserRuleContext): # actually a EvansParser.GenExpressionContext super().__init__(parser) self.op = None # Token self.copyFrom(ctx) def genExpression(self, i:int=None): if i is None: return self.getTypedRuleContexts(EvansParser.GenExpressionContext) else: return self.getTypedRuleContext(EvansParser.GenExpressionContext,i)
<<<<<<< HEAD """ Nomenclature: Host machine Machine on which this pexpect script is run. Container Container created to run the modules on. container_child - pexpect-spawned child created to create the container host_child - pexpect spawned child living on the host container """ #The MIT License (MIT) # #Copyright (C) 2014 OpenBet Limited # #Permission is hereby granted, free of charge, to any person obtaining a copy of #this software and associated documentation files (the "Software"), to deal in #the Software without restriction, including without limitation the rights to #use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies #of the Software, and to permit persons to whom the Software is furnished to do #so, subject to the following conditions: # #The above copyright notice and this permission notice shall be included in all #copies or substantial portions of the Software. # #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #ITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL #THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #SOFTWARE. from shutit_module import ShutItModule import pexpect import sys import util import time import re import subprocess from distutils import spawn class conn_docker(ShutItModule): """Connects ShutIt to docker daemon and starts the container. """ def is_installed(self,shutit): """Always considered false for ShutIt setup. """ return False def _check_docker(self, shutit): # Do some docker capability checking cfg = shutit.cfg cp = cfg['config_parser'] # If we have sudo, kill any current sudo timeout. This is a bit of a # hammer and somewhat unfriendly, but tells us if we need a password. if spawn.find_executable('sudo') is not None: if subprocess.call(['sudo', '-k']) != 0: shutit.fail('Couldn\'t kill sudo timeout') # Check the executable is in the path. Not robust (as it could be sudo) # but deals with the common case of 'docker.io' being wrong. docker = cfg['host']['docker_executable'].split(' ') if spawn.find_executable(docker[0]) is None: msg = ('Didn\'t find %s on the path, what is the ' + 'executable name (or full path) of docker?') % (docker[0],) cfg['host']['docker_executable'] = shutit.prompt_cfg(msg, 'host', 'docker_executable') return False # First check we actually have docker and password (if needed) works check_cmd = docker + ['--version'] str_cmd = ' '.join(check_cmd) cmd_timeout = 10 needed_password = False fail_msg = '' try: shutit.log('Running: ' + str_cmd,force_stdout=True,prefix=False) child = pexpect.spawn(check_cmd[0], check_cmd[1:], timeout=cmd_timeout) except pexpect.ExceptionPexpect: msg = ('Failed to run %s (not sure why this has happened)...try ' + 'a different docker executable?') % (str_cmd,) cfg['host']['docker_executable'] = shutit.prompt_cfg(msg, 'host', 'docker_executable') return False try: if child.expect(['assword', pexpect.EOF]) == 0: needed_password = True if cfg['host']['password'] == '': msg = ('Running "%s" has prompted for a password, please ' + 'enter your host password') % (str_cmd,) cfg['host']['password'] = shutit.prompt_cfg(msg, 'host', 'password', ispass=True) child.sendline(cfg['host']['password']) child.expect(pexpect.EOF) except pexpect.ExceptionPexpect: fail_msg = '"%s" did not complete in %ss' % (str_cmd, cmd_timeout) child.close() if child.exitstatus != 0: fail_msg = '"%s" didn\'t return a 0 exit code' % (str_cmd,) if fail_msg: # TODO: Ideally here we'd split up our checks so if it asked for a # password, kill the sudo timeout and run `sudo -l`. We then know if # the password is right or not so we know what we need to prompt # for. At the moment we assume the password if it was asked for. if needed_password: msg = (fail_msg + ', your host password or ' + 'docker_executable config may be wrong (I will assume ' + 'password).\nPlease confirm your host password.') sec, name, ispass = 'host', 'password', True else: msg = (fail_msg + ', your docker_executable ' + 'setting seems to be wrong.\nPlease confirm your host ' + 'password.') sec, name, ispass = 'host', 'docker_executable', False cfg[sec][name] = shutit.prompt_cfg(msg, sec, name, ispass=ispass) return False # Now check connectivity to the docker daemon check_cmd = docker + ['info'] str_cmd = ' '.join(check_cmd) child = pexpect.spawn(check_cmd[0], check_cmd[1:], timeout=cmd_timeout) try: if child.expect(['assword', pexpect.EOF]) == 0: child.sendline(cfg['host']['password']) child.expect(pexpect.EOF) except pexpect.ExceptionPexpect: shutit.fail('"' + str_cmd + '" did not complete in ' + str(cmd_timeout) + 's, ' + 'is the docker daemon overloaded?') child.close() if child.exitstatus != 0: msg = ('"' + str_cmd + '" didn\'t return a 0 exit code, is the ' + 'docker daemon running? Do you need to set the ' + 'docker_executable config to use sudo? Please confirm the ' + 'docker executable.') cfg['host']['docker_executable'] = shutit.prompt_cfg(msg, 'host', 'docker_executable') return True def build(self,shutit): """Sets up the container ready for building. """ # Uncomment for testing for "failure" cases. #sys.exit(1) while not self._check_docker(shutit): pass cfg = shutit.cfg docker = cfg['host']['docker_executable'].split(' ') # Always-required options cfg['build']['cidfile'] = '/tmp/' + cfg['host']['username'] + '_cidfile_' + cfg['build']['build_id'] cidfile_arg = '--cidfile=' + cfg['build']['cidfile'] # Singly-specified options privileged_arg = '' lxc_conf_arg = '' name_arg = '' hostname_arg = '' volume_arg = '' rm_arg = '' if cfg['build']['privileged']: privileged_arg = '--privileged=true' else: # TODO: put in to ensure serve always works. # Need better solution in place, eg refresh builder when build needs privileged privileged_arg = '--privileged=true' if cfg['build']['lxc_conf'] != '': lxc_conf_arg = '--lxc-conf=' + cfg['build']['lxc_conf'] if cfg['container']['name'] != '': name_arg = '--name=' + cfg['container']['name'] if cfg['container']['hostname'] != '': hostname_arg = '-h=' + cfg['container']['hostname'] if cfg['host']['resources_dir'] != '': volume_arg = '-v=' + cfg['host']['resources_dir'] + ':/resources' # Incompatible with do_repository_work if cfg['container']['rm']: rm_arg = '--rm=true' # Multiply-specified options port_args = [] dns_args = [] ports_list = cfg['container']['ports'].strip().split() dns_list = cfg['host']['dns'].strip().split() for portmap in ports_list: port_args.append('-p=' + portmap) for dns in dns_list: dns_args.append('-dns=' + dns) docker_command = docker + [ arg for arg in [ 'run', cidfile_arg, privileged_arg, lxc_conf_arg, name_arg, hostname_arg, volume_arg, rm_arg, ] + port_args + dns_args + [ '-t', '-i', cfg['container']['docker_image'], '/bin/bash' ] if arg != '' ] if cfg['build']['interactive'] >= 2: print('\n\nAbout to start container. ' + 'Ports mapped will be: ' + ', '.join(port_args) + ' (from\n\n[host]\nports:<value>\n\nconfig, building on the ' + 'configurable base image passed in in:\n\n\t--image <image>\n' + '\nor config:\n\n\t[container]\n\tdocker_image:<image>)\n\nBase ' + 'image in this case is:\n\n\t' + cfg['container']['docker_image'] + '\n\n' + util.colour('31','[Hit return to continue]')) raw_input('') shutit.log('\n\nCommand being run is:\n\n' + ' '.join(docker_command),force_stdout=True,prefix=False) shutit.log('\n\nThis may download the image, please be patient\n\n',force_stdout=True,prefix=False) container_child = pexpect.spawn(docker_command[0], docker_command[1:]) expect = ['assword',cfg['expect_prompts']['base_prompt'].strip(),'Waiting','ulling','endpoint','Download'] res = container_child.expect(expect,9999) while True: shutit.log(""">>>\n""" + container_child.before + container_child.after + """\n<<<""") if res == 0: shutit.log('...') res = shutit.send(cfg['host']['password'],child=container_child,expect=expect,timeout=9999,check_exit=False,fail_on_empty_before=False) elif res == 1: shutit.log('Prompt found, breaking out') break else: res = container_child.expect(expect,9999) continue # Get the cid time.sleep(5) # cidfile creation is sometimes slow... shutit.log('Slept') cid = open(cfg['build']['cidfile']).read() shutit.log('Opening file') if cid == '' or re.match('^[a-z0-9]+$', cid) == None: shutit.fail('Could not get container_id - quitting. Check whether ' + 'other containers may be clashing on port allocation or name.' + '\nYou might want to try running: sudo docker kill ' + cfg['container']['name'] + '; sudo docker rm ' + cfg['container']['name'] + '\nto resolve a name clash or: ' + cfg['host']['docker_executable'] + ' ps -a | grep ' + cfg['container']['ports'] + ' | awk \'{print $1}\' | ' + 'xargs ' + cfg['host']['docker_executable'] + ' kill\nto + ' 'resolve a port clash\n') shutit.log('cid: ' + cid) cfg['container']['container_id'] = cid # Now let's have a host_child shutit.log('Creating host child') shutit.log('Spawning host child') host_child = pexpect.spawn('/bin/bash') shutit.log('Spawning done') # Some pexpect settings shutit.pexpect_children['host_child'] = host_child shutit.pexpect_children['container_child'] = container_child shutit.log('Setting default expect') shutit.set_default_expect(cfg['expect_prompts']['base_prompt']) shutit.log('Setting default expect done') host_child.logfile_send = container_child.logfile_send = sys.stdout host_child.logfile_read = container_child.logfile_read = sys.stdout host_child.maxread = container_child.maxread = 2000 host_child.searchwindowsize = container_child.searchwindowsize = 1024 delay = cfg['build']['command_pause'] host_child.delaybeforesend = container_child.delaybeforesend = delay # Set up prompts and let the user do things before the build # host child shutit.log('Setting default child') shutit.set_default_child(host_child) shutit.log('Setting default child done') shutit.log('Setting up default prompt on host child') shutit.log('Setting up prompt') shutit.setup_prompt('real_user_prompt',prefix='REAL_USER') shutit.log('Setting up prompt done') # container child shutit.set_default_child(container_child) shutit.log('Setting up default prompt on container child') shutit.setup_prompt('pre_build', prefix='PRE_BUILD') shutit.send('export HOME=/root') shutit.get_distro_info() shutit.setup_prompt('root_prompt', prefix='ROOT') # Create the build directory and put the config in it. shutit.send('mkdir -p ' + shutit.cfg ['build']['build_db_dir'] + '/' + shutit.cfg['build']['build_id']) # Record the command we ran and the python env. # TODO: record the image id we ran against - wait for "docker debug" command shutit.send_file(shutit.cfg['build']['build_db_dir'] + '/' + shutit.cfg['build']['build_id'] + '/python_env.sh',str(sys.__dict__),log=False) shutit.send_file(shutit.cfg['build']['build_db_dir'] + '/' + shutit.cfg['build']['build_id'] + '/docker_command.sh',' '.join(docker_command),log=False) shutit.pause_point('Anything you want to do now the container is connected to?', level=2) return True def finalize(self,shutit): """Finalizes the container, exiting for us back to the original shell and performing any repository work required. """ # Put build info into the container shutit.send('mkdir -p ' + shutit.cfg ['build']['build_db_dir'] + '/' + shutit.cfg['build']['build_id']) shutit.send_file(shutit.cfg['build']['build_db_dir'] + '/' + shutit.cfg['build']['build_id'] + '/build.log', util.get_commands(shutit)) shutit.send_file(shutit.cfg['build']['build_db_dir'] + '/' + shutit.cfg['build']['build_id'] + '/build_commands.sh',util.get_commands(shutit)) shutit.add_line_to_file(shutit.cfg['build']['build_id'],shutit.cfg ['build']['build_db_dir'] + '/builds') # Finish with the container shutit.pexpect_children['container_child'].sendline('exit') # Exit container host_child = shutit.pexpect_children['host_child'] shutit.set_default_child(host_child) shutit.set_default_expect(shutit.cfg['expect_prompts']['real_user_prompt']) # Tag and push etc shutit.pause_point('\nDoing final committing/tagging on the overall container and creating the artifact.', child=shutit.pexpect_children['host_child'],print_input=False, level=3) shutit.do_repository_work(shutit.cfg['repository']['name'],docker_executable=shutit.cfg['host']['docker_executable'],password=shutit.cfg['host']['password']) # Final exits host_child.sendline('exit') # Exit raw bash return True def conn_module(): return conn_docker( 'shutit.tk.conn_docker', -0.1, description='Connect ShutIt to docker' ) class setup(ShutItModule): def is_installed(self,shutit): """Always considered false for ShutIt setup. """ return False def build(self,shutit): """Initializes container ready for build, setting password and updating package management. """ do_update = True # Seems to be broken #do_update = shutit.cfg[self.module_id]['do_update'] if shutit.cfg['container']['install_type'] == 'apt': shutit.send('export DEBIAN_FRONTEND=noninteractive') if do_update: shutit.send('apt-get update',timeout=9999,check_exit=False) shutit.send('dpkg-divert --local --rename --add /sbin/initctl') shutit.send('ln -f -s /bin/true /sbin/initctl') elif shutit.cfg['container']['install_type'] == 'yum': if do_update: shutit.send('yum update -y',timeout=9999) shutit.set_password(shutit.cfg['container']['password']) shutit.pause_point('Anything you want to do to the container before the build starts?', level=2) return True def remove(self,shutit): """Removes anything performed as part of build. """ cfg = shutit.cfg if cfg['container']['install_type'] == 'yum': shutit.remove('passwd') return True def get_config(self, shutit): """Gets the configured core pacakges, and whether to perform the
<filename>variableProcessing/BFSVM_class/bfsvmClass.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat Apr 18 18:20:34 2020 @author: ishidaira """ from cvxopt import matrix import numpy as np from numpy import linalg import cvxopt from sklearn import preprocessing from sklearn.svm import SVC from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import StratifiedKFold from imblearn.over_sampling import SMOTE import pandas as pd from scipy.stats import norm import seaborn as sns import matplotlib.pyplot as plt from BFSVM_class import precision from imblearn.over_sampling import SVMSMOTE from BFSVM_class.fsvmClass import HYP_SVM from os import mkdir from BFSVM_class.LS_FSVM import * # three kernel functions def linear_kernel(x1, x2): return np.dot(x1, x2) # param p def polynomial_kernel(x, y, p=1.5): return (1 + np.dot(x, y)) ** p # param sigmma def gaussian_kernel(x, y, sigma=1.0): # print(-linalg.norm(x-y)**2) x = np.asarray(x) y = np.asarray(y) return np.exp((-linalg.norm(x - y) ** 2) / (sigma ** 2)) # lowsampling def lowSampling(df, percent=3 / 3): data1 = df[df[0] == 1] # 将多数 data0 = df[df[0] == 0] # 将少数类别的样本放在data0 index = np.random.randint( len(data1), size=int(percent * (len(df) - len(data1))) ) # 随机给定下采样取出样本的序号 lower_data1 = data1.iloc[list(index)] # 下采样 return pd.concat([lower_data1, data0]) # upsampling def upSampling(X_train, y_train): X_train, y_train = SMOTE(kind="svm").fit_sample(X_train, y_train) return X_train, y_train def grid_search(X, y, kernel="gaussian"): rs = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=0) precisionArray = [] if kernel == "gaussian": index = 0 for C in [1, 10, 100, 1000]: for sigma in [0.6, 0.7, 0.8]: pre = np.zeros(6) for test, train in rs.split(X, y): clf = BFSVM(kernel="gaussian", C=C, sigma=sigma) clf.mvalue(X[train], y[train]) clf.fit(X[train], y[train]) y_predict = clf.predict(X[test]) y_test = np.array(y[test]) for i in range(len(y_test)): if y_test[i] == 0: y_test[i] = -1 pre += precision(y_predict, y_test) precisionArray.append((C, sigma, pre / 3)) index += 1 return precisionArray class BFSVM(object): # initial function def __init__( self, kernel=None, fuzzyvalue="Logistic", databalance="origine", a=4, b=3, C=None, P=None, sigma=None, ): """ init function """ self.kernel = kernel self.C = C self.P = P self.sigma = sigma self.fuzzyvalue = fuzzyvalue self.a = a self.b = b self.databalance = databalance if self.C is not None: self.C = float(self.C) def mvalue(self, X_train, y_train): """ calculate the membership value :param X_train: X train sample :param y_train: y train sample """ # print('fuzzy value:', self.fuzzyvalue ) # ######## Methode 1 : FSVM ######## # clf = HYP_SVM(kernel='polynomial', C=1.5, P=1.5) # clf.m_func(X_train,y_train) # clf.fit(X_train, y_train) # score = clf.project(X_train)-clf.b # ######## Methode 2:LSFSVM ######## kernel_dict = {"type": "RBF", "sigma": 0.717} fuzzyvalue = {"type": "Cen", "function": "Lin"} clf = LSFSVM(10, kernel_dict, fuzzyvalue, "o", 3 / 4) m = clf._mvalue(X_train, y_train) self.abc = m clf.fit(X_train, y_train) clf.predict(X_train) score = clf.y_predict - clf.b # ######## Methode 3:SVM ######## # clf = SVC(gamma='scale') # clf.fit(X_train,y_train) # score = clf.decision_function(X_train) # print(score) if self.fuzzyvalue == "Lin": m_value = (score - max(score)) / (max(score) - min(score)) elif self.fuzzyvalue == "Bridge": s_up = np.percentile(score, 55) s_down = np.percentile(score, 45) m_value = np.zeros((len(score))) for i in range(len(score)): if score[i] > s_up: m_value[i] = 1 elif score[i] <= s_down: m_value[i] = 0 else: m_value[i] = (score[i] - s_down) / (s_up - s_down) elif self.fuzzyvalue == "Logistic": # a = self.a # b = self.b scoreorg = score a = 1 N_plus = len(y_train[y_train == 1]) sorted(score, reverse=True) b = np.mean(score[N_plus - 1] + score[N_plus]) m_value = [ 1 / (np.exp(-a * scoreorg[i] - b) + 1) for i in range(len(score)) ] self.m_value = np.array(m_value) # y_str = [] # for i,y in enumerate(y_train): # if y==1: # y_str.append("positive") # else: # y_str.append("negative") # m_value = pd.DataFrame(dict(membership=self.m_value,y=y_str)) elif self.fuzzyvalue == "Probit": mu = self.mu sigma = self.sigma self.m_value = norm.cdf((score - mu) / sigma) # return m_value def fit(self, X_train, y): """ use the train samples to fit classifier :param X_train: X train sample :param y: y train sample """ # extract the number of samples and attributes of train and test n_samples, n_features = X_train.shape # initialize a 2n*2n matrix of Kernel function K(xi,xj) self.K = np.zeros((2 * n_samples, 2 * n_samples)) for i in range(n_samples): for j in range(n_samples): if self.kernel == "polynomial": self.K[i, j] = polynomial_kernel(X_train[i], X_train[j], self.P) elif self.kernel == "gaussian": self.K[i, j] = gaussian_kernel(X_train[i], X_train[j], self.sigma) else: self.K[i, j] = linear_kernel(X_train[i], X_train[j]) # print(K[i,j]) X_train = np.asarray(X_train) # P = K(xi,xj) P = cvxopt.matrix(self.K) # q = [-1,...,-1,-2,...,-2] q = np.concatenate((np.ones(n_samples) * -1, np.ones(n_samples) * -2)) q = cvxopt.matrix(q) # equality constraints # A = [1,...,1,0,...,0] A = np.concatenate((np.ones(n_samples) * 1, np.zeros(n_samples))) A = cvxopt.matrix(A) A = matrix(A, (1, 2 * n_samples), "d") # changes done # b = [0.0] b = cvxopt.matrix(0.0) # inequality constraints if self.C is None: # tmp1 = -1 as diagonal, n*n tmp1 = np.diag(np.ones(n_samples) * -1) # tmp1 = 2*tmp1 n*2n tmp1 = np.hstack((tmp1, tmp1)) # tmp2 = n*2n, the second matrix n*n diagonal as -1 tmp2 = np.diag(np.ones(n_samples) * -1) tmp2 = np.hstack((np.diag(np.zeros(n_samples)), tmp2)) G = cvxopt.matrix(np.vstack((tmp1, tmp2))) G = matrix(G, (6 * n_samples, 2 * n_samples), "d") # h = [0,0,0,...,0] 2n*1 h = cvxopt.matrix(np.zeros(2 * n_samples)) else: # tmp1 = -1 as diagonal, n*n tmp1 = np.diag(np.ones(n_samples) * -1) # tmp1 = 2*tmp1 n*2n tmp1 = np.hstack((tmp1, tmp1)) # tmp2 = n*2n, the second matrix n*n diagonal as -1 tmp2 = np.diag(np.ones(n_samples) * -1) tmp2 = np.hstack((np.diag(np.zeros(n_samples)), tmp2)) tmp3 = np.identity(n_samples) # tmp3 = 2*tmp3 n*2n tmp3 = np.hstack((tmp3, tmp3)) # tmp4 = n*2n, the second matrix n*n diagonal as 1 tmp4 = np.identity(n_samples) tmp4 = np.hstack((np.diag(np.zeros(n_samples)), tmp4)) # G = tmp1,tmp2,tmp3,tmp4 shape 4n*2n G = cvxopt.matrix(np.vstack((tmp1, tmp2, tmp3, tmp4))) # G = matrix(G, (6*n_samples,2*n_samples), 'd') # h = 4n*1 tmp1 = np.zeros(2 * n_samples) tmp2 = np.ones(n_samples) * self.C * self.m_value tmp3 = np.ones(n_samples) * self.C * (1 - self.m_value) h = cvxopt.matrix(np.hstack((tmp1, tmp2, tmp3))) # solve QP problem solution = cvxopt.solvers.qp(P, q, G, h, A, b) # print(solution['status']) # Lagrange multipliers # a = [epsilon1,...,epsilonN,beta1,...,betaN] a = np.ravel(solution["x"]) epsilon = a[:n_samples] beta = a[n_samples:] # Support vectors have non zero lagrange multipliers sv = np.array(list(epsilon + beta > 1e-5) and list(beta > 1e-5)) # alpha<cm so zeta = 0 sv_alpha = np.array(list(abs(epsilon + beta - self.C * self.m_value) > 1e-8)) # beta<c(1-m) so mu = 0 sv_beta = np.array(list(abs(beta - self.C * (1 - self.m_value)) > 1e-8)) # print(sv_beta) ind = np.arange(len(epsilon))[sv] ind_alpha = np.arange(len(epsilon))[sv_alpha] ind_beta = np.arange(len(epsilon))[sv_beta] self.epsilon_org = epsilon self.epsilon = epsilon[sv] epsilon_alpha = epsilon[sv_alpha] epsilon_beta = epsilon[sv_beta] self.beta_org = beta self.beta = beta[sv] self.sv = X_train[sv] self.sv_y = y[sv] self.sv_yorg = y X_train = np.asarray(X_train) self.K = self.K[:n_samples, :n_samples] # Calculate b # ####methode 1###### # self.b = 0 # for n in range(len(self.epsilon)): # self.b -= np.sum(self.epsilon * self.K[ind[n], sv]) # self.b /= len(self.epsilon) # ####methode 2###### # b= 0 # if len(epsilon_alpha): # for n in range(len(epsilon_alpha)): # b += 1 # b -= np.sum(epsilon_alpha * self.K[ind_alpha[n], sv_alpha]) # # if len(epsilon_beta): # for n in range(len(epsilon_beta)): # b -= 1 # b -= np.sum(epsilon_beta * self.K[ind_beta[n], sv_beta]) # # self.b = b/(len(epsilon_alpha)+len(epsilon_beta)) # print('a',self.b) # # ####methode 3####### b_alpha, b_beta = 0, 0 if len(epsilon_alpha): for n in range(len(epsilon_alpha)): b_alpha = np.max(1 - epsilon_alpha * self.K[ind_alpha[n], sv_alpha]) if len(epsilon_beta): for n in range(len(epsilon_beta)): b_beta = np.min(-1 - epsilon_beta * self.K[ind_beta[n], sv_beta]) self.b = -(b_alpha + b_beta) / 2 # ####methode 4####### # b_alpha = 0 ## print('a',epsilon_alpha) ## print('b',epsilon_beta) # if len(epsilon_alpha): # for n in range(len(epsilon_alpha)): # b_alpha += 1 # b_alpha -= np.sum(epsilon_alpha * self.K[ind_alpha[n], sv_alpha]) # b_alpha /= len(epsilon_alpha) # # b_beta = 0 # if len(epsilon_beta): # for n in range(len(epsilon_beta)): # b_beta -= 1 # b_beta -= np.sum(epsilon_beta * self.K[ind_beta[n], sv_beta]) # b_beta /= len(epsilon_beta) # if b_alpha and b_beta: # self.b = (b_alpha+b_beta)/2 # else: # if b_alpha: # self.b = b_alpha # else: # self.b = b_beta # print(self.b) # Weight vector ######methode 5####### # self.b = 0 # for n in range(len(epsilon_alpha)): # self.b += y[sv_alpha] # self.b -= np.sum(epsilon_alpha * self.K[ind[n], sv_alpha]) # for n in range(len(epsilon_beta)): # self.b += y[sv_beta] # self.b -= np.sum(epsilon_beta * self.K[ind[n], sv_beta]) # self.b /= (len(epsilon_alpha)+len(epsilon_beta)) if self.kernel ==
"""Dashborad views.""" from django.shortcuts import get_object_or_404, render, redirect from django.http import HttpResponse, HttpResponseRedirect from django.urls import reverse from django.views import generic from wallet.models import Transaction from scheduler.models import BookingRecord, Session from account.models import User, Student, Tutor from django.contrib.auth.decorators import login_required from datetime import datetime, timedelta, date, time from django.utils import timezone from django.core.mail import send_mail # def MybookingsView(request): # #model = scheduler # record = student.BookingRecord_set.all # #template_name = 'my_bookings.html' # #context_object_name = 'mybookings' # return render(request, 'my_bookings.html', {'record': record}) class MytransactionsView(generic.ListView): model = Transaction template_name = 'my_transactions.html' context_object_name = 'my_transaction_records' def get_context_data(self, **kwargs): context = super(MytransactionsView, self).get_context_data(**kwargs) #print datetime.now() if self.request.user.is_authenticated == False: context['records'] = None context['not_logged_in'] = 'true' return context if self.request.session['username'] is None: context['records'] = None return context else: context['username'] = self.request.session['username'] usrn = self.request.session['username'] user = User.objects.get(username=usrn) try: usr = Student.objects.get(user=user) context['user_type'] = 'Student' except Student.DoesNotExist: usr = Tutor.objects.get(user=user) context['user_type'] = 'Tutor' #usr = get_object_or_404(Student, user=user) records = usr.bookingrecord_set.all() context['records'] = [] for r in records: one_month_before_now = timezone.now() - timedelta(days=30) if r.entry_date > one_month_before_now: context['records'].append(r) transactions = [] for rec in context['records']: transactions.append(rec.transaction) context['transactions'] = transactions context['zipped'] = zip(records,transactions) context['balance'] = usr.wallet_balance return context class MybookingsView(generic.ListView): model = BookingRecord template_name = 'my_bookings.html' context_object_name = 'my_booking_records' def get_context_data(self, **kwargs): context = super(MybookingsView, self).get_context_data(**kwargs) if self.request.user.is_authenticated == False: context['records'] = None context['not_logged_in'] = 'true' return context if self.request.session['username'] is None: context['records'] = None return context else: usrn = self.request.session['username'] user = User.objects.get(username=usrn) try: stu = Student.objects.get(user=user) context['is_student'] = 'true' except Student.DoesNotExist: context['is_student'] = 'false' try: tut = Tutor.objects.get(user=user) context['is_tutor'] = 'true' except Tutor.DoesNotExist: context['is_tutor'] = 'false' if 'id' in self.request.GET: context['id'] = 'selected' context['record'] =BookingRecord.objects.filter(id=self.request.GET['id']).first() if context['is_student'] == 'true': if context['record'].student == stu: context['selected_type'] = 'as_stu' if context['is_tutor'] == 'true': if context['record'].tutor == tut: context['selected_type'] = 'as_tut' return context else: context['id'] = 'not_selected' if context['is_tutor'] == 'true': context['records_as_tut'] = tut.bookingrecord_set.all() if context['is_student'] == 'true': context['records_as_stu'] = stu.bookingrecord_set.all() return context def post(self, request, **kwargs): print(request) bkRecord_id = self.request.POST.get('booking_id', '') bkrc = BookingRecord.objects.filter(id=bkRecord_id).first() sess = Session.objects.get(bookingrecord=bkrc) one_day_from_now = timezone.now() + timedelta(hours=24) if one_day_from_now < sess.start_time: sess.status = Session.BOOKABLE sess.save() # save is needed for functioning - Jiayao refund = bkrc.transaction.amount + bkrc.transaction.commission usrn = self.request.session['username'] user = User.objects.get(username=usrn) usr = get_object_or_404(Student, user=user) usr.wallet_balance += refund usr.save() bkrc.status = BookingRecord.CANCELED bkrc.save() tut = bkrc.tutor send_mail('Session Canceled', 'Please check on Tutoria, your session with '+ bkrc.tutor.first_name + ' ' + bkrc.tutor.last_name + ' from ' + str(sess.start_time) + ' to ' + str(sess.end_time) + ' has been canceled.', '<EMAIL>', [usr.email], False) send_mail('Session Canceled', 'Please check on Tutoria, your session with '+ bkrc.student.first_name + ' ' + bkrc.student.last_name + ' from ' + str(sess.start_time) + ' to ' + str(sess.end_time) + ' has been canceled.', '<EMAIL>', [tut.email], False) return redirect('dashboard/mybookings/') else: return HttpResponse("This session is within 24 hours and can't be canceled!") class MytimetableView(generic.ListView): model = BookingRecord template_name = 'my_timetable.html' context_object_name = 'my_timetable' def get_context_data(self, **kwargs): """Get context data.""" context = super(MytimetableView, self).get_context_data(**kwargs) isStudent = False isTutor = False if self.request.user.is_authenticated == False: context['timetable'] = None return context if self.request.session['username'] is None: context['timetable'] = None return context else: usrn = self.request.session['username'] user = User.objects.get(username=usrn) try: usr = Tutor.objects.get(user=user) isTutor = True except Tutor.DoesNotExist: pass context['is_tutor'] = isTutor if isTutor: context['tutor'] = usr # generate a 1D array which stores the timetable # there are 14 days # private tutor has 24 timeslots per day while contracted tutor has 48 is_contracted_tutor = usr.tutor_type == 'CT' slots_per_day = 48 if is_contracted_tutor else 24 days_to_display = 14 timetable = [] # retrieve date of today today = date.today() now = datetime.now() if is_contracted_tutor: now_index = now.hour * 2 + now.minute // 30 else: now_index = now.hour for i in range(days_to_display * slots_per_day): # add all timeslots that are not in database as CLOSED session # TODO this part might be dirty, we should create all sessions in advance d = today + timedelta(days = i // slots_per_day) if is_contracted_tutor: hour = (i % slots_per_day) // 2 minute = 0 if i % 2 == 0 else 30 else: hour = i % slots_per_day minute = 0 start_time = datetime.combine(d, time(hour, minute)) if is_contracted_tutor: end_time = start_time + timedelta(minutes = 30) else: end_time = start_time + timedelta(hours = 1) session, _ = Session.objects.get_or_create( start_time=timezone.make_aware(start_time), end_time=timezone.make_aware(end_time), tutor=usr) elem = {'status' : session.status, 'date' : str(today + timedelta(days=i // slots_per_day)), 'id': session.id} #print("elem = ", elem) timetable.append(elem) # closed # print("tot: " + str(days_to_display * slots_per_day)) # convert "date" of today to "datetime" of today's 0 'o clock # init_time = datetime.combine(today, datetime.min.time()) for session in usr.session_set.all(): start_time = session.start_time hour_diff = start_time.hour - 0 # if timetable starts from 0 hour_diff += 8 # timezone issue (todo) #print(start_time, " hour ", start_time.hour) minute_diff = start_time.minute date_diff = (start_time.date() - today).days # filter date within days_to_display if 0 <= date_diff < days_to_display: index = date_diff * slots_per_day if is_contracted_tutor: index += hour_diff * 2 + minute_diff // 30 else: index += hour_diff # print("date_diff = ", date_diff, "hour_diff = ", hour_diff, # "minute_diff = ", minute_diff, "index = ", index) #print(index) timetable[index]['status'] = str(session.status) timetable[index]['id'] = session.id #print("index = ", index, " session id = ", session.id) if session.status == session.BOOKED: # logic is a bit tricky here # note we won't pass session id but booking_record id here # because one session can have multiple booking records # tutor wants to see the latest record when clicking the slot records = session.bookingrecord_set.all() for record in records: if record.status == record.INCOMING: timetable[index]['id'] = record.id break for i in range(days_to_display * slots_per_day): if i <= now_index: timetable[i]['status'] = "PASSED" context['tutor_timetable'] = timetable #print(timetable) try: usr = Student.objects.get(user=user) isStudent = True except Student.DoesNotExist: pass context['is_student'] = isStudent if isStudent: context['student'] = usr # generate a 1D array which stores the timetable # there are 14 days slots_per_day = 48 days_to_display = 7 timetable = [] # retrieve date of today today = date.today() now = datetime.now() now_index = now.hour * 2 + now.minute // 30 for i in range(days_to_display * slots_per_day): elem = {'status' : 'X', 'date' : str(today + timedelta(days=i // slots_per_day)), 'id': ''} timetable.append(elem) # closed for record in usr.bookingrecord_set.all(): start_time = record.session.start_time hour_diff = start_time.hour - 0 # if timetable starts from 0 hour_diff += 8 # timezone issue (todo) #print(start_time, " hour ", start_time.hour) minute_diff = start_time.minute date_diff = (start_time.date() - today).days # filter date within days_to_display if 0 <= date_diff < days_to_display: index = date_diff * slots_per_day index += hour_diff * 2 + minute_diff // 30 # print("date_diff = ", date_diff, "hour_diff = ", hour_diff, # "minute_diff = ", minute_diff, "index = ", index) #print(index) if record.status == record.INCOMING or record.status == record.ONGOING: # TODO what about other states? timetable[index]['status'] = 'A' # use 'A' to represent this record has detail to be referred timetable[index]['id'] = record.id if record.tutor.tutor_type == record.tutor.PRIVATE_TUTOR: timetable[index + 1]['status'] = 'A' timetable[index + 1]['id'] = record.id for i in range(days_to_display * slots_per_day): if i <= now_index: timetable[i]['status'] = "PASSED" context['student_timetable'] = timetable #print(timetable) return context def post(self, request, **kwargs): session_id = self.request.POST.get('session_id', '') session = Session.objects.get(id=session_id) start_time = session.start_time now = timezone.make_aware(datetime.now()) if (now + timedelta(days = 1) > start_time): return HttpResponse("Session within 24 hours before start time is locked!") #print("before update, session = ", session, " status = ", session.status) if session.status == session.CLOSED: session.status = session.BOOKABLE elif session.status == session.BOOKABLE: session.status = session.CLOSED session.save() #print("after update, session = ", session, " status = ", session.status) return redirect('/dashboard/mytimetable/') class MyWalletView(generic.TemplateView): template_name = 'my_wallet.html' def get_context_data(self, **kwargs): context = super(MyWalletView, self).get_context_data(**kwargs) if self.request.user.is_authenticated == False: context['not_logged_in'] = 'true' return context if self.request.session['username'] is None: return context else: context['status'] = 1 usrn = self.request.session['username'] user = User.objects.get(username=usrn) context['balance'] = user.wallet_balance return context def post(self, req, *args, **kwargs): usrn = self.request.session['username'] user = User.objects.get(username=usrn) balance = user.wallet_balance op = req.POST['operation'] amount = float(req.POST['amount']) print(op) if op == 'topup': user.wallet_balance
repr don't break repr(self.fs) self.assertIsInstance(six.text_type(self.fs), six.text_type) def test_getmeta(self): # Get the meta dict meta = self.fs.getmeta() # Check default namespace self.assertEqual(meta, self.fs.getmeta(namespace="standard")) # Must be a dict self.assertTrue(isinstance(meta, dict)) no_meta = self.fs.getmeta("__nosuchnamespace__") self.assertIsInstance(no_meta, dict) self.assertFalse(no_meta) def test_isfile(self): self.assertFalse(self.fs.isfile("foo.txt")) self.fs.create("foo.txt") self.assertTrue(self.fs.isfile("foo.txt")) self.fs.makedir("bar") self.assertFalse(self.fs.isfile("bar")) def test_isdir(self): self.assertFalse(self.fs.isdir("foo")) self.fs.create("bar") self.fs.makedir("foo") self.assertTrue(self.fs.isdir("foo")) self.assertFalse(self.fs.isdir("bar")) def test_islink(self): self.fs.touch("foo") self.assertFalse(self.fs.islink("foo")) with self.assertRaises(errors.ResourceNotFound): self.fs.islink("bar") def test_getsize(self): self.fs.writebytes("empty", b"") self.fs.writebytes("one", b"a") self.fs.writebytes("onethousand", ("b" * 1000).encode("ascii")) self.assertEqual(self.fs.getsize("empty"), 0) self.assertEqual(self.fs.getsize("one"), 1) self.assertEqual(self.fs.getsize("onethousand"), 1000) with self.assertRaises(errors.ResourceNotFound): self.fs.getsize("doesnotexist") def test_getsyspath(self): self.fs.create("foo") try: syspath = self.fs.getsyspath("foo") except errors.NoSysPath: self.assertFalse(self.fs.hassyspath("foo")) else: self.assertIsInstance(syspath, text_type) self.assertIsInstance(self.fs.getospath("foo"), bytes) self.assertTrue(self.fs.hassyspath("foo")) # Should not throw an error self.fs.hassyspath("a/b/c/foo/bar") def test_geturl(self): self.fs.create("foo") try: self.fs.geturl("foo") except errors.NoURL: self.assertFalse(self.fs.hasurl("foo")) else: self.assertTrue(self.fs.hasurl("foo")) # Should not throw an error self.fs.hasurl("a/b/c/foo/bar") def test_geturl_purpose(self): """Check an unknown purpose raises a NoURL error. """ self.fs.create("foo") with self.assertRaises(errors.NoURL): self.fs.geturl("foo", purpose="__nosuchpurpose__") def test_validatepath(self): """Check validatepath returns an absolute path. """ path = self.fs.validatepath("foo") self.assertEqual(path, "/foo") def test_invalid_chars(self): # Test invalid path method. with self.assertRaises(errors.InvalidCharsInPath): self.fs.open("invalid\0file", "wb") with self.assertRaises(errors.InvalidCharsInPath): self.fs.validatepath("invalid\0file") def test_getinfo(self): # Test special case of root directory # Root directory has a name of '' root_info = self.fs.getinfo("/") self.assertEqual(root_info.name, "") self.assertTrue(root_info.is_dir) # Make a file of known size self.fs.writebytes("foo", b"bar") self.fs.makedir("dir") # Check basic namespace info = self.fs.getinfo("foo").raw self.assertIsInstance(info["basic"]["name"], text_type) self.assertEqual(info["basic"]["name"], "foo") self.assertFalse(info["basic"]["is_dir"]) # Check basic namespace dir info = self.fs.getinfo("dir").raw self.assertEqual(info["basic"]["name"], "dir") self.assertTrue(info["basic"]["is_dir"]) # Get the info info = self.fs.getinfo("foo", namespaces=["details"]).raw self.assertIsInstance(info, dict) self.assertEqual(info["details"]["size"], 3) self.assertEqual(info["details"]["type"], int(ResourceType.file)) # Test getdetails self.assertEqual(info, self.fs.getdetails("foo").raw) # Raw info should be serializable try: json.dumps(info) except (TypeError, ValueError): raise AssertionError("info should be JSON serializable") # Non existant namespace is not an error no_info = self.fs.getinfo("foo", "__nosuchnamespace__").raw self.assertIsInstance(no_info, dict) self.assertEqual(no_info["basic"], {"name": "foo", "is_dir": False}) # Check a number of standard namespaces # FS objects may not support all these, but we can at least # invoke the code info = self.fs.getinfo("foo", namespaces=["access", "stat", "details"]) # Check that if the details namespace is present, times are # of valid types. if "details" in info.namespaces: details = info.raw["details"] self.assertIsInstance(details.get("accessed"), (type(None), int, float)) self.assertIsInstance(details.get("modified"), (type(None), int, float)) self.assertIsInstance(details.get("created"), (type(None), int, float)) self.assertIsInstance( details.get("metadata_changed"), (type(None), int, float) ) def test_exists(self): # Test exists method. # Check root directory always exists self.assertTrue(self.fs.exists("/")) self.assertTrue(self.fs.exists("")) # Check files don't exist self.assertFalse(self.fs.exists("foo")) self.assertFalse(self.fs.exists("foo/bar")) self.assertFalse(self.fs.exists("foo/bar/baz")) self.assertFalse(self.fs.exists("egg")) # make some files and directories self.fs.makedirs("foo/bar") self.fs.writebytes("foo/bar/baz", b"test") # Check files exists self.assertTrue(self.fs.exists("foo")) self.assertTrue(self.fs.exists("foo/bar")) self.assertTrue(self.fs.exists("foo/bar/baz")) self.assertFalse(self.fs.exists("egg")) self.assert_exists("foo") self.assert_exists("foo/bar") self.assert_exists("foo/bar/baz") self.assert_not_exists("egg") # Delete a file self.fs.remove("foo/bar/baz") # Check it no longer exists self.assert_not_exists("foo/bar/baz") self.assertFalse(self.fs.exists("foo/bar/baz")) self.assert_not_exists("foo/bar/baz") # Check root directory always exists self.assertTrue(self.fs.exists("/")) self.assertTrue(self.fs.exists("")) def test_listdir(self): # Check listing directory that doesn't exist with self.assertRaises(errors.ResourceNotFound): self.fs.listdir("foobar") # Check aliases for root self.assertEqual(self.fs.listdir("/"), []) self.assertEqual(self.fs.listdir("."), []) self.assertEqual(self.fs.listdir("./"), []) # Make a few objects self.fs.writebytes("foo", b"egg") self.fs.writebytes("bar", b"egg") self.fs.makedir("baz") # This should not be listed self.fs.writebytes("baz/egg", b"egg") # Check list works six.assertCountEqual(self, self.fs.listdir("/"), ["foo", "bar", "baz"]) six.assertCountEqual(self, self.fs.listdir("."), ["foo", "bar", "baz"]) six.assertCountEqual(self, self.fs.listdir("./"), ["foo", "bar", "baz"]) # Check paths are unicode strings for name in self.fs.listdir("/"): self.assertIsInstance(name, text_type) # Create a subdirectory self.fs.makedir("dir") # Should start empty self.assertEqual(self.fs.listdir("/dir"), []) # Write some files self.fs.writebytes("dir/foofoo", b"egg") self.fs.writebytes("dir/barbar", b"egg") # Check listing subdirectory six.assertCountEqual(self, self.fs.listdir("dir"), ["foofoo", "barbar"]) # Make sure they are unicode stringd for name in self.fs.listdir("dir"): self.assertIsInstance(name, text_type) self.fs.create("notadir") with self.assertRaises(errors.DirectoryExpected): self.fs.listdir("notadir") def test_move(self): # Make a file self.fs.writebytes("foo", b"egg") self.assert_isfile("foo") # Move it self.fs.move("foo", "bar") # Check it has gone from original location self.assert_not_exists("foo") # Check it exists in the new location, and contents match self.assert_exists("bar") self.assert_bytes("bar", b"egg") # Check moving to existing file fails self.fs.writebytes("foo2", b"eggegg") with self.assertRaises(errors.DestinationExists): self.fs.move("foo2", "bar") # Check move with overwrite=True self.fs.move("foo2", "bar", overwrite=True) self.assert_not_exists("foo2") # Check moving to a non-existant directory with self.assertRaises(errors.ResourceNotFound): self.fs.move("bar", "egg/bar") # Check moving an unexisting source with self.assertRaises(errors.ResourceNotFound): self.fs.move("egg", "spam") # Check moving between different directories self.fs.makedir("baz") self.fs.writebytes("baz/bazbaz", b"bazbaz") self.fs.makedir("baz2") self.fs.move("baz/bazbaz", "baz2/bazbaz") self.assert_not_exists("baz/bazbaz") self.assert_bytes("baz2/bazbaz", b"bazbaz") # Check moving a directory raises an error self.assert_isdir("baz2") self.assert_not_exists("yolk") with self.assertRaises(errors.FileExpected): self.fs.move("baz2", "yolk") def test_makedir(self): # Check edge case of root with self.assertRaises(errors.DirectoryExists): self.fs.makedir("/") # Making root is a null op with recreate slash_fs = self.fs.makedir("/", recreate=True) self.assertIsInstance(slash_fs, SubFS) self.assertEqual(self.fs.listdir("/"), []) self.assert_not_exists("foo") self.fs.makedir("foo") self.assert_isdir("foo") self.assertEqual(self.fs.gettype("foo"), ResourceType.directory) self.fs.writebytes("foo/bar.txt", b"egg") self.assert_bytes("foo/bar.txt", b"egg") # Directory exists with self.assertRaises(errors.DirectoryExists): self.fs.makedir("foo") # Parent directory doesn't exist with self.assertRaises(errors.ResourceNotFound): self.fs.makedir("/foo/bar/baz") self.fs.makedir("/foo/bar") self.fs.makedir("/foo/bar/baz") with self.assertRaises(errors.DirectoryExists): self.fs.makedir("foo/bar/baz") with self.assertRaises(errors.DirectoryExists): self.fs.makedir("foo/bar.txt") def test_makedirs(self): self.assertFalse(self.fs.exists("foo")) self.fs.makedirs("foo") self.assertEqual(self.fs.gettype("foo"), ResourceType.directory) self.fs.makedirs("foo/bar/baz") self.assertTrue(self.fs.isdir("foo/bar")) self.assertTrue(self.fs.isdir("foo/bar/baz")) with self.assertRaises(errors.DirectoryExists): self.fs.makedirs("foo/bar/baz") self.fs.makedirs("foo/bar/baz", recreate=True) self.fs.writebytes("foo.bin", b"test") with self.assertRaises(errors.DirectoryExpected): self.fs.makedirs("foo.bin/bar") with self.assertRaises(errors.DirectoryExpected): self.fs.makedirs("foo.bin/bar/baz/egg") def test_repeat_dir(self): # Catches bug with directories contain repeated names, # discovered in s3fs self.fs.makedirs("foo/foo/foo") self.assertEqual(self.fs.listdir(""), ["foo"]) self.assertEqual(self.fs.listdir("foo"), ["foo"]) self.assertEqual(self.fs.listdir("foo/foo"), ["foo"]) self.assertEqual(self.fs.listdir("foo/foo/foo"), []) scan = list(self.fs.scandir("foo")) self.assertEqual(len(scan), 1) self.assertEqual(scan[0].name, "foo") def test_open(self): # Open a file that doesn't exist with self.assertRaises(errors.ResourceNotFound): self.fs.open("doesnotexist", "r") self.fs.makedir("foo") # Create a new text file text = "Hello, World" with self.fs.open("foo/hello", "wt") as f: repr(f) self.assertIsInstance(f, io.IOBase) self.assertTrue(f.writable()) self.assertFalse(f.readable()) self.assertFalse(f.closed) f.write(text) self.assertTrue(f.closed) # Read it back with self.fs.open("foo/hello", "rt") as f: self.assertIsInstance(f, io.IOBase) self.assertTrue(f.readable()) self.assertFalse(f.writable()) self.assertFalse(f.closed) hello = f.read() self.assertTrue(f.closed) self.assertEqual(hello, text) self.assert_text("foo/hello", text) # Test overwrite text = "Goodbye, World" with self.fs.open("foo/hello", "wt") as f: f.write(text) self.assert_text("foo/hello", text) # Open from missing dir with self.assertRaises(errors.ResourceNotFound): self.fs.open("/foo/bar/test.txt") # Test fileno returns a file number, if supported by the file. with self.fs.open("foo/hello") as f: try: fn = f.fileno() except io.UnsupportedOperation: pass else: self.assertEqual(os.read(fn, 7), b"Goodbye") # Test text files are proper iterators over themselves lines = os.linesep.join(["Line 1", "Line 2", "Line 3"]) self.fs.writetext("iter.txt", lines) with self.fs.open("iter.txt") as f: for actual, expected in zip(f, lines.splitlines(1)): self.assertEqual(actual, expected) def test_openbin_rw(self): # Open a file that doesn't exist with self.assertRaises(errors.ResourceNotFound): self.fs.openbin("doesnotexist", "r") self.fs.makedir("foo") # Create a new text file text = b"Hello, World\n" with self.fs.openbin("foo/hello", "w") as f: repr(f) self.assertIsInstance(f, io.IOBase) self.assertTrue(f.writable()) self.assertFalse(f.readable()) self.assertEqual(len(text), f.write(text)) self.assertFalse(f.closed) self.assertTrue(f.closed) with self.assertRaises(errors.FileExists): with self.fs.openbin("foo/hello", "x") as f: pass # Read it back with self.fs.openbin("foo/hello", "r") as f: self.assertIsInstance(f, io.IOBase) self.assertTrue(f.readable()) self.assertFalse(f.writable()) hello = f.read() self.assertFalse(f.closed) self.assertTrue(f.closed) self.assertEqual(hello, text) self.assert_bytes("foo/hello", text) # Test overwrite text = b"Goodbye, World" with self.fs.openbin("foo/hello", "w") as f: self.assertEqual(len(text), f.write(text)) self.assert_bytes("foo/hello", text) # Test FileExpected raised with self.assertRaises(errors.FileExpected): self.fs.openbin("foo") # directory # Open from missing dir with self.assertRaises(errors.ResourceNotFound): self.fs.openbin("/foo/bar/test.txt") # Test fileno returns a file number, if supported by the file. with self.fs.openbin("foo/hello") as f: try: fn = f.fileno() except io.UnsupportedOperation: pass else: self.assertEqual(os.read(fn, 7), b"Goodbye") # Test binary files are proper iterators over themselves lines = b"\n".join([b"Line 1", b"Line 2", b"Line 3"]) self.fs.writebytes("iter.bin", lines) with self.fs.openbin("iter.bin") as f: for actual, expected in zip(f, lines.splitlines(1)): self.assertEqual(actual, expected) def test_open_files(self): # Test file-like objects work as expected. with self.fs.open("text", "w") as f: repr(f) text_type(f) self.assertIsInstance(f, io.IOBase) self.assertTrue(f.writable()) self.assertFalse(f.readable()) self.assertFalse(f.closed) self.assertEqual(f.tell(), 0) f.write("Hello\nWorld\n") self.assertEqual(f.tell(), 12) f.writelines(["foo\n", "bar\n", "baz\n"]) with self.assertRaises(IOError): f.read(1) self.assertTrue(f.closed) with self.fs.open("bin", "wb") as f: with self.assertRaises(IOError): f.read(1) with self.fs.open("text", "r") as f: repr(f) text_type(f) self.assertIsInstance(f, io.IOBase) self.assertFalse(f.writable()) self.assertTrue(f.readable()) self.assertFalse(f.closed) self.assertEqual( f.readlines(), ["Hello\n", "World\n", "foo\n", "bar\n", "baz\n"] ) with self.assertRaises(IOError): f.write("no") self.assertTrue(f.closed) with self.fs.open("text", "rb") as f: self.assertIsInstance(f, io.IOBase) self.assertFalse(f.writable()) self.assertTrue(f.readable()) self.assertFalse(f.closed) self.assertEqual(f.readlines(8), [b"Hello\n", b"World\n"]) with self.assertRaises(IOError): f.write(b"no") self.assertTrue(f.closed) with self.fs.open("text", "r") as f: self.assertEqual(list(f), ["Hello\n", "World\n", "foo\n", "bar\n", "baz\n"]) self.assertFalse(f.closed) self.assertTrue(f.closed) iter_lines = iter(self.fs.open("text")) self.assertEqual(next(iter_lines), "Hello\n") with self.fs.open("unicode", "w") as f: self.assertEqual(12, f.write("Héllo\nWörld\n")) with self.fs.open("text", "rb") as f: self.assertIsInstance(f, io.IOBase) self.assertFalse(f.writable()) self.assertTrue(f.readable()) self.assertTrue(f.seekable()) self.assertFalse(f.closed) self.assertEqual(f.read(1), b"H") self.assertEqual(3, f.seek(3, Seek.set)) self.assertEqual(f.read(1), b"l") self.assertEqual(6, f.seek(2, Seek.current)) self.assertEqual(f.read(1), b"W") self.assertEqual(22, f.seek(-2, Seek.end)) self.assertEqual(f.read(1), b"z") with self.assertRaises(ValueError): f.seek(10, 77) self.assertTrue(f.closed) with self.fs.open("text", "r+b") as f: self.assertIsInstance(f, io.IOBase) self.assertTrue(f.readable()) self.assertTrue(f.writable()) self.assertTrue(f.seekable()) self.assertFalse(f.closed) self.assertEqual(5, f.seek(5)) self.assertEqual(5, f.truncate()) self.assertEqual(0, f.seek(0)) self.assertEqual(f.read(), b"Hello") self.assertEqual(10, f.truncate(10)) self.assertEqual(5, f.tell()) self.assertEqual(0, f.seek(0)) print(repr(self.fs)) print(repr(f)) self.assertEqual(f.read(), b"Hello\0\0\0\0\0") self.assertEqual(4, f.seek(4)) f.write(b"O") self.assertEqual(4, f.seek(4)) self.assertEqual(f.read(1), b"O") self.assertTrue(f.closed) def test_openbin(self): # Write a binary file with self.fs.openbin("file.bin", "wb") as write_file: repr(write_file) text_type(write_file) self.assertIsInstance(write_file, io.IOBase) self.assertTrue(write_file.writable()) self.assertFalse(write_file.readable()) self.assertFalse(write_file.closed) self.assertEqual(3, write_file.write(b"\0\1\2")) self.assertTrue(write_file.closed) # Read a binary file with self.fs.openbin("file.bin", "rb") as read_file: repr(write_file) text_type(write_file) self.assertIsInstance(read_file, io.IOBase) self.assertTrue(read_file.readable()) self.assertFalse(read_file.writable()) self.assertFalse(read_file.closed) data = read_file.read() self.assertEqual(data, b"\0\1\2") self.assertTrue(read_file.closed) # Check disallow text mode with self.assertRaises(ValueError): with self.fs.openbin("file.bin", "rt") as read_file: pass # Check errors with
pass def prepend(self, draw_func): ''' ''' pass def property_overridable_library_set(self): ''' ''' pass def property_unset(self): ''' ''' pass def remove(self, draw_func): ''' ''' pass def type_recast(self): ''' ''' pass def values(self): ''' ''' pass class VIEW3D_MT_object_constraints(bpy_types.Menu, bpy_types._GenericUI): bl_label = None ''' ''' bl_rna = None ''' ''' id_data = None ''' ''' def append(self, draw_func): ''' ''' pass def as_pointer(self): ''' ''' pass def bl_rna_get_subclass(self): ''' ''' pass def bl_rna_get_subclass_py(self): ''' ''' pass def draw(self, _context): ''' ''' pass def draw_collapsible(self, context, layout): ''' ''' pass def draw_preset(self, _context): ''' ''' pass def driver_add(self): ''' ''' pass def driver_remove(self): ''' ''' pass def get(self): ''' ''' pass def is_extended(self): ''' ''' pass def is_property_hidden(self): ''' ''' pass def is_property_overridable_library(self): ''' ''' pass def is_property_readonly(self): ''' ''' pass def is_property_set(self): ''' ''' pass def items(self): ''' ''' pass def keyframe_delete(self): ''' ''' pass def keyframe_insert(self): ''' ''' pass def keys(self): ''' ''' pass def path_from_id(self): ''' ''' pass def path_menu(self, searchpaths, operator, props_default, prop_filepath, filter_ext, filter_path, display_name, add_operator): ''' ''' pass def path_resolve(self): ''' ''' pass def pop(self): ''' ''' pass def prepend(self, draw_func): ''' ''' pass def property_overridable_library_set(self): ''' ''' pass def property_unset(self): ''' ''' pass def remove(self, draw_func): ''' ''' pass def type_recast(self): ''' ''' pass def values(self): ''' ''' pass class VIEW3D_MT_object_context_menu(bpy_types.Menu, bpy_types._GenericUI): bl_label = None ''' ''' bl_rna = None ''' ''' id_data = None ''' ''' def append(self, draw_func): ''' ''' pass def as_pointer(self): ''' ''' pass def bl_rna_get_subclass(self): ''' ''' pass def bl_rna_get_subclass_py(self): ''' ''' pass def draw(self, context): ''' ''' pass def draw_collapsible(self, context, layout): ''' ''' pass def draw_preset(self, _context): ''' ''' pass def driver_add(self): ''' ''' pass def driver_remove(self): ''' ''' pass def get(self): ''' ''' pass def is_extended(self): ''' ''' pass def is_property_hidden(self): ''' ''' pass def is_property_overridable_library(self): ''' ''' pass def is_property_readonly(self): ''' ''' pass def is_property_set(self): ''' ''' pass def items(self): ''' ''' pass def keyframe_delete(self): ''' ''' pass def keyframe_insert(self): ''' ''' pass def keys(self): ''' ''' pass def path_from_id(self): ''' ''' pass def path_menu(self, searchpaths, operator, props_default, prop_filepath, filter_ext, filter_path, display_name, add_operator): ''' ''' pass def path_resolve(self): ''' ''' pass def pop(self): ''' ''' pass def prepend(self, draw_func): ''' ''' pass def property_overridable_library_set(self): ''' ''' pass def property_unset(self): ''' ''' pass def remove(self, draw_func): ''' ''' pass def type_recast(self): ''' ''' pass def values(self): ''' ''' pass class VIEW3D_MT_object_convert(bpy_types.Menu, bpy_types._GenericUI): bl_label = None ''' ''' bl_rna = None ''' ''' id_data = None ''' ''' def append(self, draw_func): ''' ''' pass def as_pointer(self): ''' ''' pass def bl_rna_get_subclass(self): ''' ''' pass def bl_rna_get_subclass_py(self): ''' ''' pass def draw(self, context): ''' ''' pass def draw_collapsible(self, context, layout): ''' ''' pass def draw_preset(self, _context): ''' ''' pass def driver_add(self): ''' ''' pass def driver_remove(self): ''' ''' pass def get(self): ''' ''' pass def is_extended(self): ''' ''' pass def is_property_hidden(self): ''' ''' pass def is_property_overridable_library(self): ''' ''' pass def is_property_readonly(self): ''' ''' pass def is_property_set(self): ''' ''' pass def items(self): ''' ''' pass def keyframe_delete(self): ''' ''' pass def keyframe_insert(self): ''' ''' pass def keys(self): ''' ''' pass def path_from_id(self): ''' ''' pass def path_menu(self, searchpaths, operator, props_default, prop_filepath, filter_ext, filter_path, display_name, add_operator): ''' ''' pass def path_resolve(self): ''' ''' pass def pop(self): ''' ''' pass def prepend(self, draw_func): ''' ''' pass def property_overridable_library_set(self): ''' ''' pass def property_unset(self): ''' ''' pass def remove(self, draw_func): ''' ''' pass def type_recast(self): ''' ''' pass def values(self): ''' ''' pass class VIEW3D_MT_object_mode_pie(bpy_types.Menu, bpy_types._GenericUI): bl_label = None ''' ''' bl_rna = None ''' ''' id_data = None ''' ''' def append(self, draw_func): ''' ''' pass def as_pointer(self): ''' ''' pass def bl_rna_get_subclass(self): ''' ''' pass def bl_rna_get_subclass_py(self): ''' ''' pass def draw(self, _context): ''' ''' pass def draw_collapsible(self, context, layout): ''' ''' pass def draw_preset(self, _context): ''' ''' pass def driver_add(self): ''' ''' pass def driver_remove(self): ''' ''' pass def get(self): ''' ''' pass def is_extended(self): ''' ''' pass def is_property_hidden(self): ''' ''' pass def is_property_overridable_library(self): ''' ''' pass def is_property_readonly(self): ''' ''' pass def is_property_set(self): ''' ''' pass def items(self): ''' ''' pass def keyframe_delete(self): ''' ''' pass def keyframe_insert(self): ''' ''' pass def keys(self): ''' ''' pass def path_from_id(self): ''' ''' pass def path_menu(self, searchpaths, operator, props_default, prop_filepath, filter_ext, filter_path, display_name, add_operator): ''' ''' pass def path_resolve(self): ''' ''' pass def pop(self): ''' ''' pass def prepend(self, draw_func): ''' ''' pass def property_overridable_library_set(self): ''' ''' pass def property_unset(self): ''' ''' pass def remove(self, draw_func): ''' ''' pass def type_recast(self): ''' ''' pass def values(self): ''' ''' pass class VIEW3D_MT_object_parent(bpy_types.Menu, bpy_types._GenericUI): bl_label = None ''' ''' bl_rna = None ''' ''' id_data = None ''' ''' def append(self, draw_func): ''' ''' pass def as_pointer(self): ''' ''' pass def bl_rna_get_subclass(self): ''' ''' pass def bl_rna_get_subclass_py(self): ''' ''' pass def draw(self, _context): ''' ''' pass def draw_collapsible(self, context, layout): ''' ''' pass def draw_preset(self, _context): ''' ''' pass def driver_add(self): ''' ''' pass def driver_remove(self): ''' ''' pass def get(self): ''' ''' pass def is_extended(self): ''' ''' pass def is_property_hidden(self): ''' ''' pass def is_property_overridable_library(self): ''' ''' pass def is_property_readonly(self): ''' ''' pass def is_property_set(self): ''' ''' pass def items(self): ''' ''' pass def keyframe_delete(self): ''' ''' pass def keyframe_insert(self): ''' ''' pass def keys(self): ''' ''' pass def path_from_id(self): ''' ''' pass def path_menu(self, searchpaths, operator, props_default, prop_filepath, filter_ext, filter_path, display_name, add_operator): ''' ''' pass def path_resolve(self): ''' ''' pass def pop(self): ''' ''' pass def prepend(self, draw_func): ''' ''' pass def property_overridable_library_set(self): ''' ''' pass def property_unset(self): ''' ''' pass def remove(self, draw_func): ''' ''' pass def type_recast(self): ''' ''' pass def values(self): ''' ''' pass class VIEW3D_MT_object_quick_effects(bpy_types.Menu, bpy_types._GenericUI): bl_label = None ''' ''' bl_rna = None ''' ''' id_data = None ''' ''' def append(self, draw_func): ''' ''' pass def as_pointer(self): ''' ''' pass def bl_rna_get_subclass(self): ''' ''' pass def bl_rna_get_subclass_py(self): ''' ''' pass def draw(self, _context): ''' ''' pass def draw_collapsible(self, context, layout): ''' ''' pass def draw_preset(self, _context): ''' ''' pass def driver_add(self): ''' ''' pass def driver_remove(self): ''' ''' pass def get(self): ''' ''' pass def is_extended(self): ''' ''' pass def is_property_hidden(self): ''' ''' pass def is_property_overridable_library(self): ''' ''' pass def is_property_readonly(self): ''' ''' pass def is_property_set(self): ''' ''' pass def items(self): ''' ''' pass def keyframe_delete(self): ''' ''' pass def keyframe_insert(self): ''' ''' pass def keys(self): ''' ''' pass def path_from_id(self): ''' ''' pass def path_menu(self, searchpaths, operator, props_default, prop_filepath, filter_ext, filter_path, display_name, add_operator): ''' ''' pass def path_resolve(self): ''' ''' pass def pop(self): ''' ''' pass def prepend(self, draw_func): ''' ''' pass def property_overridable_library_set(self): ''' ''' pass def property_unset(self): ''' ''' pass def remove(self, draw_func): ''' ''' pass def type_recast(self): ''' ''' pass def values(self): ''' ''' pass class VIEW3D_MT_object_relations(bpy_types.Menu, bpy_types._GenericUI): bl_label = None
import numpy as np import cv2 import matplotlib.pyplot as plt import matplotlib.image as mpimg import pickle from combined_thresh import combined_thresh from perspective_transform import perspective_transform def line_fit(binary_warped, T): """ Find and fit lane lines """ # Assuming you have created a warped binary image called "binary_warped" # Take a histogram of the bottom half of the image # 假设你已经创建了一个名为“binary_warped”的变形二进制图像,获取图像下半部分的直方图 # axis=0 按列计算 img_roi_y = 700 # [1]设置ROI区域的左上角的起点 img_roi_x = 0 img_roi_height = binary_warped.shape[0] # [2]设置ROI区域的高度 img_roi_width = binary_warped.shape[1] # [3]设置ROI区域的宽度 img_roi = binary_warped[img_roi_y:img_roi_height, img_roi_x:img_roi_width] # cv2.imshow('img_roi', img_roi) histogram = np.sum(img_roi[0 :, :], axis=0) # histogram = np.sum(img_roi[int(np.floor(binary_warped.shape[0]*(1-T))):,:], axis=0) # plt.show() # Create an output image to draw on and visualize the result # 创建一个输出图像来绘制并可视化结果 out_img = (np.dstack((binary_warped, binary_warped, binary_warped))*255).astype('uint8') cv2.rectangle(out_img, (img_roi_x, img_roi_y), (img_roi_width, img_roi_height), (255, 0, 0), 5) # Find the peak of the left and right halves of the histogram # These will be the starting point for the left and right lines # 找出直方图左右两半的峰值 这些将成为左右线的起点 midpoint = np.int(histogram.shape[0]/2) leftx_base = np.argmax(histogram[100:midpoint]) + 100 rightx_base = np.argmax(histogram[midpoint:-100]) + midpoint # PMH:如果一边未检测到车道线,即无直方图峰值,则根据另一条车道线复制一个搜索起点 if (leftx_base == 100): leftx_base = np.argmax(histogram[midpoint:-100]) - midpoint if (rightx_base == midpoint): rightx_base = np.argmax(histogram[100:midpoint]) + midpoint # Choose the number of sliding windows 选择滑动窗口的数量 nwindows = 9 # Set height of windows # 设置窗口的高度 128 window_height = np.int(binary_warped.shape[0]/nwindows) # Identify the x and y positions of all nonzero pixels in the image # 确定图像中所有非零像素的x和y位置 nonzero = binary_warped.nonzero() nonzeroy = np.array(nonzero[0]) nonzerox = np.array(nonzero[1]) # Current positions to be updated for each window # 为每个窗口更新当前位置 leftx_current = leftx_base rightx_current = rightx_base leftx_current_last = leftx_base rightx_current_last = rightx_base leftx_current_next = leftx_base rightx_current_next = rightx_base # Set the width of the windows +/- margin # 设置窗口+/-边距的宽度 margin = 150 # Set minimum number of pixels found to recenter window # 设置发现到最近窗口的最小像素数 minpix = 50 # Create empty lists to receive left and right lane pixel indices # 创建空列表以接收左右车道像素索引 left_lane_inds = [] right_lane_inds = [] # plt.figure(2) # plt.subplot(2, 1, 1) # plt.plot(histogram) # Step through the windows one by one # 逐一浏览窗口 for window in range(nwindows-2): # Identify window boundaries in x and y (and right and left) # 确定x和y(以及右和左)的窗口边界 win_y_low = binary_warped.shape[0] - (window+1)*window_height win_y_high = binary_warped.shape[0] - window*window_height leftx_current = leftx_current_next rightx_current = rightx_current_next # 设置滑移窗口左右边界 win_xleft_low = leftx_current - margin win_xleft_high = leftx_current + margin win_xright_low = rightx_current - margin win_xright_high = rightx_current + margin # Draw the windows on the visualization image # 在可视化图像上绘制窗口 cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (0, 255, 0), 2) cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high), (0, 255, 0), 2) # plt.subplot(2, 1, 2) # plt.imshow(out_img, cmap='gray', vmin=0, vmax=1) # Identify the nonzero pixels in x and y within the window # 确定窗口内x和y的非零像素 good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0] good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0] # Append these indices to the lists # 将这些索引附加到列表中 left_lane_inds.append(good_left_inds) right_lane_inds.append(good_right_inds) # If you found > minpix pixels, recenter next window on their mean position # 如果找到> minpix像素,请在其平均位置上重新调整下一个窗口 if len(good_left_inds) > minpix: leftx_current_next = np.int(np.mean(nonzerox[good_left_inds])) else: if window > 2: leftx_current_next = leftx_current + (leftx_current - leftx_current_last) # good_left_inds = int((win_y_low + win_y_high) / 2) * binary_warped.shape[0] + leftx_current # left_lane_inds.append(np.int64(good_left_inds)) # 20180516 pmh 加入方框中点作为拟合点 else: leftx_current_next = leftx_base if len(good_right_inds) > minpix: rightx_current_next = np.int(np.mean(nonzerox[good_right_inds])) else: if window > 2: rightx_current_next = rightx_current + (rightx_current - rightx_current_last) # right_lane_inds.append(good_right_inds) else: rightx_current_next = rightx_base leftx_current_last = leftx_current rightx_current_last = rightx_current # plt.figure(2) # plt.subplot(2, 1, 1) # plt.plot(histogram) # plt.subplot(2, 1, 2) # plt.imshow(out_img, cmap='gray', vmin=0, vmax=1) # cv2.imshow('out_img', out_img) # plt.savefig('D:/CIDI/data/L/line_fit_histo/') # plt.close() # save_file = '%s%06d%s' % ('D:/data/PNG20180206dataAllRectJPG/result1/', num_i+100000, 'Lr.jpg') # fig1 = plt.gcf() # fig1.set_size_inches(18.5, 10.5) # plt.savefig(save_file) # Concatenate the arrays of indices连接索引数组 left_lane_inds = np.concatenate(left_lane_inds) right_lane_inds = np.concatenate(right_lane_inds) # Extract left and right line pixel positions # 提取左右线像素位置 leftx = nonzerox[left_lane_inds] lefty = nonzeroy[left_lane_inds] rightx = nonzerox[right_lane_inds] righty = nonzeroy[right_lane_inds] ret = {} # 如果车道线非空,则进行拟合二次曲线 if (len(left_lane_inds) > 0) & (len(right_lane_inds) > 0): # Fit a second order polynomial to each left_fit = np.polyfit(lefty, leftx, 2) right_fit = np.polyfit(righty, rightx, 2) # Return a dict of relevant variables ret['left_fit'] = left_fit ret['right_fit'] = right_fit ret['nonzerox'] = nonzerox ret['nonzeroy'] = nonzeroy ret['out_img'] = out_img ret['left_lane_inds'] = left_lane_inds ret['right_lane_inds'] = right_lane_inds ret['histo'] = histogram return ret def tune_fit(binary_warped, left_fit, right_fit): """ Given a previously fit line, quickly try to find the line based on previous lines 给定一条先前合适的线条,快速尝试根据之前的线条找到线条 """ # Assume you now have a new warped binary image # from the next frame of video (also called "binary_warped") # It's now much easier to find line pixels! # 假设你现在有一个来自下一帧视频的新的变形二进制图像(也称为“binary_warped”)现在找到线像素更容易了! nonzero = binary_warped.nonzero() nonzeroy = np.array(nonzero[0]) nonzerox = np.array(nonzero[1]) margin = 100 left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] + margin))) right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] + margin))) # Again, extract left and right line pixel positions # 再次提取左右线像素位置 leftx = nonzerox[left_lane_inds] # 对一系列的bool变量返回 true 的 id 号 lefty = nonzeroy[left_lane_inds] rightx = nonzerox[right_lane_inds] righty = nonzeroy[right_lane_inds] # If we don't find enough relevant points, return all None (this means error) # 如果我们找不到足够的相关点,则返回全部无(这意味着错误) min_inds = 10 if lefty.shape[0] < min_inds or righty.shape[0] < min_inds: return None # Fit a second order polynomial to each # 为每个拟合一个二阶多项式 left_fit = np.polyfit(lefty, leftx, 2) right_fit = np.polyfit(righty, rightx, 2) # Generate x and y values for plotting ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] ) left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2] right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2] # Return a dict of relevant variables ret = {} ret['left_fit'] = left_fit ret['right_fit'] = right_fit ret['nonzerox'] = nonzerox ret['nonzeroy'] = nonzeroy ret['left_lane_inds'] = left_lane_inds ret['right_lane_inds'] = right_lane_inds return ret def viz1(binary_warped, ret, save_file=None): """ Visualize each sliding window location and predicted lane lines, on binary warped image save_file is a string representing where to save the image (if None, then just display) 在二值变形图像上显示每个滑动窗口位置和预测车道线save_file是一个字符串,表示图像的保存位置(如果为None,则仅显示) """ # Grab variables from ret dictionary left_fit = ret['left_fit'] right_fit = ret['right_fit'] nonzerox = ret['nonzerox'] nonzeroy = ret['nonzeroy'] out_img = ret['out_img'] left_lane_inds = ret['left_lane_inds'] right_lane_inds = ret['right_lane_inds'] # Generate x and y values for plotting ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] ) left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2] right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2] out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0] out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255] plt.imshow(out_img) plt.plot(left_fitx, ploty, color='yellow') plt.plot(right_fitx, ploty, color='yellow') plt.xlim(0, 1280) plt.ylim(720, 0) if save_file is None: plt.show() else: plt.savefig(save_file) plt.gcf().clear() def viz2(binary_warped, ret, save_file=None): """ Visualize the predicted lane lines with margin, on binary warped image save_file is a string representing where to save the image (if None, then just display) 在二值变形图像上显示带边距的预测车道线save_file是表示图像保存位置的字符串(如果为None,则仅显示) """ # Grab variables from ret dictionary left_fit = ret['left_fit'] right_fit = ret['right_fit'] nonzerox = ret['nonzerox'] nonzeroy = ret['nonzeroy'] left_lane_inds = ret['left_lane_inds'] right_lane_inds = ret['right_lane_inds'] # Create an image to draw on and an image to show the selection window out_img = (np.dstack((binary_warped, binary_warped, binary_warped))*255).astype('uint8') window_img = np.zeros_like(out_img) # Color in left and right line pixels out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0] out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255] # Generate x and y values for plotting ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0]) left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2] right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2] # Generate a polygon to illustrate the search window area # And recast the x and y points into usable format for cv2.fillPoly() margin = 100 # NOTE: Keep this in sync with *_fit() left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))]) left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))]) left_line_pts = np.hstack((left_line_window1, left_line_window2)) right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))]) right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))]) right_line_pts = np.hstack((right_line_window1, right_line_window2)) # Draw the lane onto the warped blank image cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0)) cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0)) result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0) plt.imshow(result) plt.plot(left_fitx, ploty, color='yellow') plt.plot(right_fitx,
id = objid idstr = str("%05d" % id) self.pngs = glob.glob(self.dir+'*'+idstr+'*.png')+glob.glob(self.dir+'*'+idstr+'*.pdf') if len(self.pngs) == 0: sys.exit(' - Did not find any png files to open. Looked for '+ self.dir+'*'+idstr+'*.png --> ABORTING') self.file = self.pngs[0].split('/')[-1] # order the pngs to display if self.MASTfiles: G102_1D = [name for name in self.pngs if "g102_"+self.MASTversion+"_1d.png" in name] G102_2D = [name for name in self.pngs if "g102_"+self.MASTversion+"_2d.png" in name] G141_1D = [name for name in self.pngs if "g141_"+self.MASTversion+"_1d.png" in name] G141_2D = [name for name in self.pngs if "g141_"+self.MASTversion+"_2d.png" in name] G800_1D = [name for name in self.pngs if "g800l_"+self.MASTversion+"_1d.png" in name] G800_2D = [name for name in self.pngs if "g800l_"+self.MASTversion+"_2d.png" in name] else: G102_1D = [name for name in self.pngs if "G102.1D.png" in name] G102_2D = [name for name in self.pngs if "G102.2D.png" in name] G141_1D = [name for name in self.pngs if "G141.1D.png" in name] G141_2D = [name for name in self.pngs if "G141.2D.png" in name] G800_1D = [name for name in self.pngs if "G800L.1D.png" in name] G800_2D = [name for name in self.pngs if "G800L.2D.png" in name] zfit = [name for name in self.pngs if "zfit" in name] stack = [name for name in self.pngs if "stack" in name] mosaic = [name for name in self.pngs if "mosaic" in name] pngorderedlist = G102_1D + G102_2D + G141_1D + G141_2D + G800_1D + G800_2D + zfit + stack + mosaic remaining = list(set(self.pngs) - set(pngorderedlist)) # get files not accounted for above pngorderedlist = pngorderedlist #+ remaining self.plat = sys.platform if self.plat == 'darwin': import platform macversion = platform.mac_ver()[0] if float(macversion.split('.')[1]) > 6: # check if "open -F" is available (mac OS X 10.7.0 and above) opencmd = 'open -n -F '+' '.join(pngorderedlist) else: opencmd = 'open -n '+' '.join(pngorderedlist) elif self.plat == 'linux2' or 'Linux': opencmd = 'gthumb '+' '.join(pngorderedlist)+' &' # Update the in-GUI image self.GUIimage = None for png in self.pngs: if (self.inGUIimage == 'zfit') & ('zfitplot.png' in png): self.GUIimage = png if (self.inGUIimage == 'G102stack') & \ (('G102_stack.png' in png) or ('g102_'+self.MASTversion+'_2dstack.png' in png)): self.GUIimage = png if (self.inGUIimage == 'G141stack') & \ (('G141_stack.png' in png) or ('g141_'+self.MASTversion+'_2dstack.png' in png)): self.GUIimage = png if self.GUIimage == None: # if requested image not found for object use first png figure instead self.GUIimage = pngorderedlist[0] # Getting number of PAs for current object if self.MASTfiles: searchext = '_1d.png' else: searchext = '.1D.png' twodpng = glob.glob(self.dir+'*'+idstr+'*'+searchext) self.PAs = np.zeros(len(twodpng)) for ii in xrange(len(self.PAs)): if self.MASTfiles: namesplit = os.path.basename(twodpng[ii]).split('-pa') self.PAs[ii] = namesplit[-1][:3] else: namesplit = os.path.basename(twodpng[ii]).split('-') self.PAs[ii] = int(namesplit[1]) if namesplit[0] in ['MACS0416.1','MACS2129.4','RXJ1347.5']: # case of names with negative dec self.PAs[ii] = int(namesplit[2]) self.PAs = np.sort(np.unique(self.PAs)) # Make sure the PAs are sorted self.Npa = len(self.PAs) self.pPNG = subprocess.Popen(opencmd,shell=True,executable=os.environ["SHELL"]) time.sleep(1.1)# sleep to make sure png appear in PIDlist if self.plat == 'darwin': self.pngPID = vi.getPID('Preview.app',verbose=False) # get PID of png process elif self.plat == 'linux2' or 'Linux': self.pngPID = vi.getPID('gthumb',verbose=False) # get PID of png process # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - def openfits_but(self,position): """ Button to open fits files """ self.fitsb = Button(self) self.fitsb["text"] = "(0) Open fits files" if self.xpa: self.fitsb["command"] = self.openfits_but_cmd_xpa else: self.fitsb["command"] = self.openfits_but_cmd self.fitsb.grid(row=position[0],column=position[1],columnspan=position[2],sticky=W) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - def openfits_but_cmd_xpa(self): """ Command for openfits button """ self.regiontemp = 'temp_ds9_forinspection.reg' idstr = str("%05d" % self.currentobj) lockstr = self.lockds9string() ds9cmd = ' ' if not self.ds9windowopen: ds9cmd = ds9cmd+'ds9 -geometry 1200x600 -scale zscale '+\ lockstr+' -tile grid layout 4 '+str(2*int(self.Npamax)) self.pds9 = subprocess.Popen(ds9cmd,shell=True,executable=os.environ["SHELL"]) time.sleep(1.1)# sleep to make sure ds9 appear in PIDlist self.ds9PID = vi.getPID('ds9',verbose=False) # get PID of DS9 process self.ds9windowopen = True time.sleep(1.0) for ii in np.arange(1,17): out = commands.getoutput('xpaset -p ds9 frame new') out = commands.getoutput('xpaset -p ds9 tile') Fstart = 1 for PA in self.PAs: PAstr = '-'+str("%03d" % int(PA))+'-' if self.MASTfiles: searchexpression = self.dir+'*'+idstr+'*-pa'+PAstr[1:-1]+'_*2d.fits' else: searchexpression = self.dir+'*'+PAstr+'*'+idstr+'*2D.fits' fits_2D = glob.glob(searchexpression) for ii in xrange(len(fits_2D)): # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - out = commands.getoutput('xpaset -p ds9 frame '+str(Fstart)) regionfile = self.regiontemp.replace('.reg',PAstr+'DSCI.reg') self.ds9textregion('DSCI PA='+str(int(PA)),filename=regionfile) out = commands.getoutput('xpaset -p ds9 file '+fits_2D[ii]+'[DSCI]') out = commands.getoutput('xpaset -p ds9 regions '+regionfile) Fstart += 1 # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - out = commands.getoutput('xpaset -p ds9 frame '+str(Fstart)) regionfile = self.regiontemp.replace('.reg',PAstr+'SCI.reg') self.ds9textregion('SCI PA='+str(int(PA)),filename=regionfile) out = commands.getoutput('xpaset -p ds9 file '+fits_2D[ii]+'[SCI]') out = commands.getoutput('xpaset -p ds9 regions '+regionfile) Fstart += 1 # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - out = commands.getoutput('xpaset -p ds9 frame '+str(Fstart)) regionfile = self.regiontemp.replace('.reg',PAstr+'CONTAM.reg') self.ds9textregion('CONTAM PA='+str(int(PA)),filename=regionfile) out = commands.getoutput('xpaset -p ds9 file '+fits_2D[ii]+'[CONTAM]') out = commands.getoutput('xpaset -p ds9 regions '+regionfile) Fstart += 1 # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - out = commands.getoutput('xpaset -p ds9 frame '+str(Fstart)) regionfile = self.regiontemp.replace('.reg',PAstr+'SCI-CONTAM.reg') self.ds9textregion('SCI-CONTAM PA='+str(int(PA)),filename=regionfile) contamsub = self.subtractcontam(fits_2D[ii]) # creating file with contam. subtracted spectrum out = commands.getoutput('xpaset -p ds9 file '+contamsub) out = commands.getoutput('xpaset -p ds9 regions '+regionfile) # If a sextractor region file for the SCI-CONTAM image exists, show it. sexregion = fits_2D[ii].split('.fit')[0]+'_SCI-CONTAM.reg' if os.path.exists(sexregion): out = commands.getoutput('xpaset -p ds9 regions '+sexregion) Fstart += 1 # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - def openfits_but_cmd(self): """ Command for openfits button """ self.ds9open = True self.regiontemp = 'temp_ds9_forinspection.reg' idstr = str("%05d" % self.currentobj) lockstr = self.lockds9string() ds9cmd = 'ds9 -geometry 1200x600 -scale zscale '+lockstr+' -tile grid layout 4 '+str(2*int(self.Npa)) for PA in self.PAs: PAstr = '-'+str("%03d" % int(PA))+'-' if self.MASTfiles: searchext = '2d.fits' else: searchext = '2D.fits' fits_2D = glob.glob(self.dir+'*'+PAstr+'*'+idstr+'*'+searchext) for ii in xrange(len(fits_2D)): regionfile = self.regiontemp.replace('.reg',PAstr+'DSCI.reg') self.ds9textregion('DSCI PA='+str(int(PA)),filename=regionfile) ds9cmd = ds9cmd+' "'+fits_2D[ii]+'[DSCI]" -region '+regionfile+' ' regionfile = self.regiontemp.replace('.reg',PAstr+'SCI.reg') self.ds9textregion('SCI PA='+str(int(PA)),filename=regionfile) ds9cmd = ds9cmd+' "'+fits_2D[ii]+'[SCI]" -region '+regionfile+' ' regionfile = self.regiontemp.replace('.reg',PAstr+'CONTAM.reg') self.ds9textregion('CONTAM PA='+str(int(PA)),filename=regionfile) ds9cmd = ds9cmd+' "'+fits_2D[ii]+'[CONTAM]" -region '+regionfile+' ' regionfile = self.regiontemp.replace('.reg',PAstr+'SCI-CONTAM.reg') self.ds9textregion('SCI-CONTAM PA='+str(int(PA)),filename=regionfile) contamsub = self.subtractcontam(fits_2D[ii]) # creating file with contamination subtracted spectrum ds9cmd = ds9cmd+' "'+contamsub+'" -region '+regionfile+' ' # If a sextractor region file for the SCI-CONTAM image exists, show it. sexregion = fits_2D[ii].split('.fit')[0]+'_SCI-CONTAM.reg' if os.path.exists(sexregion): ds9cmd = ds9cmd+' -region '+sexregion+' ' self.pds9 = subprocess.Popen(ds9cmd,shell=True,executable=os.environ["SHELL"]) time.sleep(1.1)# sleep to make sure ds9 appear in PIDlist self.ds9PID = vi.getPID('ds9',verbose=False) # get PID of DS9 process # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
the same level as the module # we pass our default here super(Nodz, self).__init__(parent, configPath=self.BASE_CONFIG_PATH) self.initialize_configuration() self.config = self.configuration_data self._rename_field = RenameField(self) self._search_field = SearchField(self) self._creation_field = SearchField(self) self._context = GraphContext(self) self._attribute_context = AttributeContext(self) self._backdrop_context = BackdropContext(self) # patching original signals self.signal_plug_connected = self.signal_PlugConnected self.signal_plug_disconnected = self.signal_PlugDisconnected self.signal_socket_connected = self.signal_SocketConnected self.signal_socket_disconnected = self.signal_SocketDisconnected # test self.selected_nodes = [] @property def rename_field(self): return self._rename_field @property def search_field(self): """ holds the search field widget Returns: SearchField instance """ return self._search_field @property def creation_field(self): """ holds the creation field widgets Returns: SearchField instance """ return self._creation_field @property def context(self): """ holds the creation field widgets Returns: GraphContext instance """ return self._context @property def attribute_context(self): """ holds the attribute field widgets Returns: AttributeContext instance """ return self._attribute_context @property def backdrop_context(self): """ holds the backdrop context widget Returns: AttributeContext instance """ return self._backdrop_context def keyPressEvent(self, event): """ overrides the keyPressEvent We are adding more key press events here Args: event: Returns: """ if event.key() not in self.pressedKeys: self.pressedKeys.append(event.key()) if event.key() == Qt.QtCore.Qt.Key_Delete: self._deleteSelectedNodes() if (event.key() == Qt.QtCore.Qt.Key_F and event.modifiers() == Qt.QtCore.Qt.NoModifier): self._focus() if (event.key() == Qt.QtCore.Qt.Key_S and event.modifiers() == Qt.QtCore.Qt.NoModifier): self._nodeSnap = True if event.key() == Qt.QtCore.Qt.Key_Tab: self.signal_creation_field_request.emit() if (event.key() == Qt.QtCore.Qt.Key_F and event.modifiers() == Qt.QtCore.Qt.ControlModifier): self.signal_search_field_request.emit() if event.key() == Qt.QtCore.Qt.Key_L: self.signal_layout_request.emit() if event.key() == Qt.QtCore.Qt.Key_R: self.signal_rename_field_request.emit() # Emit signal. self.signal_KeyPressed.emit(event.key()) super(Nodz, self).keyPressEvent(event) def mousePressEvent(self, event): """ extends the mousePressEvent We are adding a context widget request on RMB click here Args: event: Returns: """ if not self.scene().itemAt(self.mapToScene(event.pos()), Qt.QtGui.QTransform()): if event.button() == Qt.QtCore.Qt.RightButton and event.modifiers() == Qt.QtCore.Qt.NoModifier: self.signal_context_request.emit(self.scene().itemAt(self.mapToScene(event.pos()), Qt.QtGui.QTransform())) super(Nodz, self).mousePressEvent(event) def _deleteSelectedNodes(self): """ overrides original method Let us emit a signal on nodes deletion Returns: """ self.signal_nodes_deleted.emit([_ for _ in self.scene().selectedItems() if isinstance(_, NodeItem)]) super(Nodz, self)._deleteSelectedNodes() def _deleteSelectedNodes(self): """ overrides original method Let us emit a signal on nodes deletion Returns: """ for node in self.scene().selectedItems(): node._remove() # Emit signal. self.signal_nodes_deleted.emit([_ for _ in self.scene().selectedItems() if isinstance(_, NodeItem)]) def retrieve_creation_position(self): """ retrieves the position where something should be created Depending on the configuration we define where to place nodes/backdrops/dots etc Returns: QPointF """ if self.configuration.node_placement == "cursor": position = Qt.QtCore.QPointF(self.mapToScene(self.mapFromGlobal(Qt.QtGui.QCursor.pos()))) elif self.configuration.node_placement == "creation_field": position = Qt.QtCore.QPointF(self.mapToScene(self.mapFromGlobal(self.creation_field.pos()))) else: position = None return position def create_node(self, name, position=None, alternate=False, node_type="default"): """ wrapper around Nodz.createNode() to extend behavior Args: name: node name position: if unset it will calculate the position based on the configuration alternate: The attribute color alternate state, if True, every 2 attribute the color will be slightly darker node_type: node type Returns: NodeItem instance """ if node_type == "backdrop": self.create_backdrop() else: if not position: position = self.retrieve_creation_position() _ = "node_{0}".format(node_type) if hasattr(self.configuration, _): # and create node with included preset node = self.createNode(name, _, position=position, alternate=alternate) else: LOG.info("Node preset for type {0} not configured.".format(node_type)) node = self.createNode(name, position=position, alternate=alternate) node.node_type = node_type self.signal_node_created.emit(node) return node def createNode(self, name="default", preset="node_default", position=None, alternate=False): """ overrides the createNode method Args: name: preset: position: alternate: Returns: """ nodeItem = NodeItem(name=name, alternate=alternate, preset=preset, config=self.configuration_data) nodeItem.signal_context_request.connect(self.on_context_request) nodeItem.signal_plug_created.connect(self.on_plug_created) nodeItem.signal_socket_created.connect(self.on_socket_created) # Store node in scene. self.scene().nodes[name] = nodeItem if not position: # Get the center of the view. position = self.mapToScene(self.viewport().rect().center()) # Set node position. self.scene().addItem(nodeItem) nodeItem.setPos(position - nodeItem.nodeCenter) # Emit signal. self.signal_NodeCreated.emit(name) return nodeItem def delete_node(self, name): raise NotImplementedError def rename_node(self, node, new_name): """ gives specified node a new name Args: node: NodeItem instance new_name: new name Returns: """ old_name = node.name if old_name != new_name: self.editNode(node, new_name) self.signal_node_name_changed.emit(node, old_name, new_name) def create_backdrop(self): pass def apply_data_type_color_to_connection(self, connection): """ takes and applies the data type color to the connection Args: connection: ConnectionItemInstance Returns: """ # set color based on data_type if self.configuration.connection_inherit_datatype_color: if connection.plugItem: expected_config = "datatype_{0}".format(connection.plugItem.dataType) else: expected_config = "datatype_{0}".format(connection.socketItem.dataType) if hasattr(self.configuration, expected_config): color = nodz_utils._convertDataToColor(self.configuration_data[expected_config]["plug"]) connection._pen = color def connect_attributes(self, plug, socket): """ creates a new ConnectionItem instance that connects plug and socket Args: plug: socket: Returns: """ connection = self.createConnection(plug, socket) return connection def createConnection(self, plug, socket): """ extends the createConnection We are adding the possibility to apply data type color to the connection here Args: plug: socket: Returns: """ connection = ConnectionItem(plug.center(), socket.center(), plug, socket) connection.plugNode = plug.parentItem().name connection.plugAttr = plug.attribute connection.plugItem = plug connection.socketNode = socket.parentItem().name connection.socketAttr = socket.attribute connection.socketItem = socket plug.connect(socket, connection) socket.connect(plug, connection) # let us apply the corresponding data type color self.apply_data_type_color_to_connection(connection) self.scene().addItem(connection) connection.updatePath() return connection def get_shared_connection(self, plug, socket): """ finds the shared connection item Args: plug: PlugItem instance socket: SocketItem instance Returns: ConnectionItem instance """ all_connections = plug.connections + socket.connections shared_connections = list(set(all_connections)) if shared_connections: if len(shared_connections) != 1: LOG.error("Multiple shared connections on plug '{0}' and socket '{1}'".format(plug, socket)) else: return shared_connections[0] def disconnect_attributes(self, plug, socket): """ removes a shared connection Args: plug: PlugItem instance socket: SocketItem instance Returns: """ connection = self.get_shared_connection(plug, socket) if connection: connection._remove() def on_context_request(self, node_item): """ placeholder method, has to be overriden in Nodegraph class Args: node_item: Returns: """ pass def on_plug_created(self, plug_item): pass def on_socket_created(self, socket_item): pass def on_selected(self, node): pass def layout_nodes(self, node_names=None): """ rearranges node positions Notes: Adopted from implementation of user https://github.com/glm-ypinczon Args: node_names: expects a list of node names otherwise it will consider all available nodes Returns: """ node_width = 300 # default value, will be replaced by node.baseWidth + margin when iterating on the first node margin = self.configuration.layout_margin_size scene_nodes = self.scene().nodes.keys() if not node_names: node_names = scene_nodes root_nodes = [] already_placed_nodes = [] # root nodes (without connection on the plug) for node_name in node_names: node = self.scene().nodes[node_name] if node is not None: node_width = node.baseWidth + margin is_root = True for plug in node.plugs.values(): is_root &= (len(plug.connections) == 0) if is_root: root_nodes.append(node) max_graph_width = 0 root_graphs = [] for root_node in root_nodes: root_graph = list() root_graph.append([root_node]) current_graph_level = 0 do_next_graph_level = True while do_next_graph_level: do_next_graph_level = False for _node in range(len(root_graph[current_graph_level])): node = root_graph[current_graph_level][_node] for attr in node.attrs: if attr in node.sockets: socket = node.sockets[attr] for connection in socket.connections: if len(root_graph) <= current_graph_level + 1: root_graph.append(list()) root_graph[current_graph_level + 1].append(connection.plugItem.parentItem()) do_next_graph_level = True current_graph_level += 1 graph_width = len(root_graph) * (node_width + margin) max_graph_width = max(graph_width, max_graph_width) root_graphs.append(root_graph) # update scne rect if needed if max_graph_width > self.scene().width(): scene_rect = self.scene().sceneRect() scene_rect.setWidth(max_graph_width) self.scene().setSceneRect(scene_rect) base_ypos = margin for root_graph in root_graphs: # set positions... # middle of the view current_xpos = max(0, 0.5 * (self.scene().width() - max_graph_width)) + max_graph_width - node_width next_base_ypos = base_ypos for nodes_at_level in root_graph: current_ypos = base_ypos for node in nodes_at_level: if len(node.plugs) > 0: if len(node.plugs.values()[0].connections) > 0: parent_pos = node.plugs.values()[0].connections[0].socketItem.parentItem().pos() current_xpos = parent_pos.x() - node_width if (node not in already_placed_nodes) and (node.name in node_names): already_placed_nodes.append(node) node_pos = Qt.QtCore.QPointF(current_xpos, current_ypos) node.setPos(node_pos) current_ypos += node.height + margin next_base_ypos = max(next_base_ypos, current_ypos) current_xpos -= node_width base_ypos = next_base_ypos self.scene().updateScene() def get_node_by_name(self, node_name): """ placeholder method, has to be overriden in Nodegraph class Args: node_name: Returns: """ pass def _returnSelection(self): """ overrides the default selection behavior We will emit all selected items in the scene and not only selected NodeItem names Returns: """ self.signal_selection_changed.emit(self.scene().selectedItems()) class Nodegraph(Basegraph): """ main Nodegraph that should be accessable without any host application """ def __init__(self, parent=None): super(Nodegraph, self).__init__(parent=parent) # this can be overriden in subclasses to allow mixing in other classes # that are not host agnostic self._window = BaseWindow(parent) self._events = Events() # create the graphingscene self._graph = Nodz(self._window) # apply logging verbosity _COCONODZ_LOG.setLevel(self.configuration.output_verbosity.upper()) self.graph.initialize() # to query nodes our own way self._all_nodes = self.graph.scene().nodes # add the graph to our window self.window.central_layout.addWidget(self.graph) # appending reserved nodetypes for node_type in self.RESERVED_NODETYPES: self.configuration.available_node_types.append(node_type) self.creation_field.available_items = self.configuration.available_node_types # patching self.graph.on_context_request = self.on_context_request self.graph.on_plug_created = self.on_plug_created self.graph.on_socket_created = self.on_socket_created self.graph.create_backdrop = self.create_backdrop self.graph.delete_node = self._delete_node self.graph.get_node_by_name = self.get_node_by_name nodz_main.connection_holder = ConnectionItem self.register_events() @property def window(self): """ holds the Window which serves as parent to all other widgets Returns: """ return self._window @window.setter def window(self, window): """ holds the Window which serves as parent to all
<reponame>pickxiguapi/MiniC-Compiler import os class NewT(): # 申请临时变量 def __init__(self, value): global newT_num self.value = value self.name = 'T' + str(newT_num) newT_num += 1 def __str__(self): return self.name def __repr__(self): return '\nname:{:10}value:{:5}'.format(self.name, self.value) def isdigit(self): return False class label(): def __init__(self, value=None): self.value = value def __repr__(self): return str(self.value) def __str__(self): return str(self.value) class Sequence(): # 四元式类 def __init__(self, action, p1='_', p2='_', result='_'): self.action = action self.p1 = p1 self.p2 = p2 self.result = result def __str__(self): return '{:5}{:10}{:10}{:10}'.format(str(self.action), str(self.p1), str(self.p2), str(self.result)) def __repr__(self): return '{:5}{:10}{:10}{:10}'.format(str(self.action), str(self.p1), str(self.p2), str(self.result)) class element(): # 每个符号的一些信息 def __init__(self, symbol, value, line): self.symbol = symbol self.value = value self.line = line # get each key by value dic = {'NUM': 'a', 'ID': 'b', 'if': 'c', 'else': 'd', 'for': 'e', 'while': 'f', 'int': 'g', 'write': 'h', 'read': 'i', '(': '(', ')': ')', ';': ';', '{': '{', '}': '}', ',': ',', '+': '+', '-': '-', '*': '*', '/': '/', '=': '=', '>': '>', '<': '<', '>=': 'w', '<=': 'x', '!=': 'y', '==': 'z', '++': '1', '--': '2', '#': '#', 'main': '3', 'return': '4', '&&': '5', '||': '6', 'char': 's', 'float': 'r'} dic = dict(zip(dic.values(), dic.keys())) self.type = dic[symbol] def __str__(self): return '\n符号:' + self.symbol + '\t值:' + self.value + '\t行数:' + str(self.line) + '\t类型:' + self.type def __repr__(self): return '\n符号:' + self.symbol + '\t值:' + self.value + '\t行数:' + str(self.line) + '\t类型:' + self.type class MyException(Exception): def __init__(self, line, type, content): Exception.__init__(self) error = '{}ERROR Line:{} {}'.format(type, line, content) print(error) with open('Error.txt', 'a+') as f: f.write('{}\n'.format(error)) class Pro(): # 这里用于错误分析 def __init__(self): self.flag = False if os.path.exists('Error.txt'): os.remove('Error.txt') def _err(self, line, type, content): MyException(line, type, content) self.flag = True def __readRules(self, filename): with open(filename, encoding='UTF-8') as f: # 去掉最后的 '\n' ter_list = f.readline()[:-1].split('\t') dic = dict() for line in f.readlines(): line_list = line[:-1].split('\t') line_dic = dict() for index, rule in enumerate(line_list): if rule != '' and index != 0: line_dic[ter_list[index]] = rule dic[line_list[0]] = line_dic return dic def createSymbolList(self, input_str, map_list, map_line): # 建立符号表 # 词法分析给出的所有元素 self.list = [] for i, ch in enumerate(input_str): self.list.append(element(ch, map_list[i], map_line[i])) # 符号表 self.chart = dict() # 函数名表 self.function = dict() # 四元式表 self.seq_list = list() self.seq_num = 0 self.seq_index = 0 self.val = 0 global newT_num newT_num = 0 # 临时变量表 self.temp_list = list() def analysis(self, filename): # 读取规则获得规则表 # 这里打开的是LL1.txt,为了获取规则 # 大致读取内容如下: {'F': {'f': 'F->f(M)D'}, 'H': {'h': 'H->hN;'}, '9': {')': '9->@', ',': '9->,V'}, 'I': {'i': 'I->ib;'}, 'J': {'{': 'J->{C}'} self.rule = self.__readRules(filename) # self.ch为当前的字符 self.ch = self.list.pop(0) # Z开始文法 self._Z() def FIRST(self, symbol, ch): # ch在不在symbol的first集 if ch in self.rule[symbol]: return True return False def FOLLOW(self, symbol, ch): # ch在不在symbol的follow集 if ch in self.rule[symbol] and self.rule[symbol][ch].split('->')[1] == '@': return True return False def getNextch(self): self.ch = self.list.pop(0) def _Write(self): if os.path.exists('Sequence.txt'): os.remove('Sequence.txt') with open('Sequence.txt', 'a+') as f: for i, seq in enumerate(self.seq_list): f.write('{:2}[{}]\n'.format(i, seq)) if os.path.exists('Parameters.txt'): os.remove('Parameters.txt') with open('Parameters.txt', 'a+') as f: for i in self.chart.keys(): f.write('name:{:2} value:0\n'.format(i)) for i in self.temp_list: f.write('name:{:2} value:0\n'.format(i.name)) if os.path.exists('seq_index.txt'): os.remove('seq_index.txt') with open('seq_index.txt', 'w') as f: f.write(str(self.seq_index)) def _op(self, op, p1, p2): ''' :param op: 运算符 :param p1: 运算数1 :param p2: 运算数2 :return: ''' if op == '+': return p1 + p2 elif op == '-': return p1 - p2 elif op == '*': return p1 * p2 elif op == '/': return p1 // p2 elif op == '>': if p1 > p2: return 1 return 0 elif op == '<': if p1 < p2: return 1 return 0 elif op == '==': if p1 == p2: return 1 return 0 elif op == '>=': if p1 >= p2: return 1 return 0 elif op == '<=': if p1 <= p2: return 1 return 0 elif op == '!=': if p1 != p2: return 1 return 0 elif op == '&&': if p1 == 1 and p2 == 1: return 1 return 0 elif op == '||': if p1 == 0 and p2 == 0: return 0 return 1 def __VALUE(self, op, p1, p2): p1_t = 0 p2_t = 0 t0 = 0 if isinstance(p1, NewT): p1_t = p1.value elif isinstance(p1, int): p1_t = p1 else: p1_t = self.chart[p1] if isinstance(p2, NewT): p2_t = p2.value elif isinstance(p2, int): p2_t = p2 else: p2_t = self.chart[p2] if isinstance(p1, int) and isinstance(p2, int): t0 = self._op(op, p1_t, p2_t) else: t0 = NewT(self._op(op, p1_t, p2_t)) self.temp_list.append(t0) self.seq_list.append(Sequence(action=op, p1=p1, p2=p2, result=t0)) self.seq_num += 1 return t0 def _Z(self): if self.ch.symbol == 'g': self.getNextch() self._Y() print('') self._Write() else: # 抛出缺少int self._err(self.ch.line, 'TYPE-', 'Wrong type define') # line type content def _Y(self): f_name = self.ch.value self.function[f_name] = label() self.function[f_name + "_main"] = label() if self.ch.symbol == 'b': self.getNextch() if self.ch.symbol == '(': self.getNextch() self._X() if self.ch.symbol == ')': self.getNextch() self.function[f_name].value = self.seq_num self._S() self.seq_list.append(Sequence(action='j', result=self.function[f_name + "_main"])) self.seq_num += 1 self.seq_index = self.seq_num self.getNextch() if self.ch.symbol == 'g': self.getNextch() self._Y() else: self._err(self.ch.line, 'DECLARATION-', 'Incorrect function / variable declaration') print('1') # 不正确的函数定义 else: self._err(self.ch.line, 'LOST-', 'Lost )') # 缺少 ) else: self._err(self.ch.line, 'LOST-', 'Lost (') # 缺少( elif self.ch.symbol == '3': # 主函数 self.getNextch() if self.ch.symbol == '(': self.getNextch() if self.ch.symbol == ')': self.getNextch() self._S() else: self._err(self.ch.line, 'LOST-', 'Lost )') # print("缺少)") else: self._err(self.ch.line, 'LOST-', 'Lost )') # print("缺少(") else: # 错误的变量名或函数定义 self._err(self.ch.line, 'DECLARATION-', 'Incorrect function / variable declaration') if(self.ch.symbol!='('): self.getNextch() if self.ch.symbol == '(': self.getNextch() self._X() if self.ch.symbol == ')': self.getNextch() self.function[f_name].value = self.seq_num self._S() self.seq_list.append(Sequence(action='j', result=self.function[f_name + "_main"])) self.seq_num += 1 self.seq_index = self.seq_num self.getNextch() if self.ch.symbol == 'g': self.getNextch() self._Y() else: self._err(self.ch.line, 'DECLARATION-', 'Incorrect function / variable declaration') print('1') # 不正确的函数定义 else: self._err(self.ch.line, 'LOST-', 'Lost )') # 缺少 ) else: self._err(3, 'GRAMMAR-', 'Undefined variable name used') # 缺少( def _X(self): if self.ch.symbol == 'g': self.getNextch() name = self.ch.value if self.ch.symbol == 'b': self.chart[name] = 0 # 应该是传过来的参数值 self.getNextch() self._W() else: self._err(self.ch.line, 'DECLARATION-', 'Incorrect function / variable declaration') print('3') else: self._err(self.ch.line, 'TYPE-', 'Wrong type define') def _W(self): if self.ch.symbol == ',': self.getNextch() self._X() elif self.FOLLOW('W', self.ch.symbol): return def _V(self): if self.ch.symbol == 'b': self.getNextch() self._9() else: self._err(self.ch.line, 'DECLARATION-', 'Incorrect function / variable declaration') print('4') def _9(self): if self.ch.symbol == ',': self.getNextch() self._V() elif self.FOLLOW('9', self.ch.symbol): return def _S(self): print(self.ch) if self.ch.symbol == '{': self.getNextch() self._A() self._C() if self.ch.symbol == '4': self.getNextch() if self.ch.symbol == 'b': self.val = self.ch.value self.getNextch() if self.ch.symbol == ';': self.getNextch() if self.ch.symbol == '}': # 栈空,不读取下一个字符 pass else: # print("lose }") self._err(self.ch.line, 'LOST-', 'Lost }') else: # print("lose ;") self._err(self.ch.line, 'LOST-', 'Lost ;') else: # print("lose variable") self._err(self.ch.line, 'TYPE-', 'Wrong return type') else: # print("lose return") self._err(self.ch.line, 'LOST-', 'Lost return') elif self.FOLLOW('S', self.ch.symbol): return else: # print("lose {") self._err(self.ch.line, 'LOST-', 'Lost {') def _A(self): # First if self.FIRST('B', self.ch.symbol): self._B() self._A() elif self.FOLLOW('A', self.ch.symbol): return else: self._err(self.ch.line, 'TYPE-', 'Wrong type declaration') def _B(self): if self.ch.symbol == 'g': self.getNextch() if self.ch.symbol == 'b': # 获得名字添加符号表 name = self.ch.value self.getNextch() if self.ch.symbol == ';': self.chart[name] = 0 self.getNextch() else: self._err(self.ch.line, 'LOST-', 'Lost ;') else: self._err(self.ch.line, 'DECLARATION-', 'Wrong type declaration') else: self._err(self.ch.line, 'DECLARATION-', 'Wrong type declaration') def _C(self): if self.FIRST('D', self.ch.symbol): self._D() self._C() elif self.FOLLOW('C', self.ch.symbol): return else: self._err(self.ch.line, 'GRAMMAR-', 'Wrong Expression') def _D(self): if self.FIRST('E', self.ch.symbol): self._E() elif self.FIRST('F', self.ch.symbol): self._F() elif self.FIRST('G', self.ch.symbol): self._G() elif self.FIRST('I', self.ch.symbol): self._I() elif self.FIRST('H', self.ch.symbol): self._H() elif self.FIRST('J', self.ch.symbol): self._J() elif self.FIRST('L', self.ch.symbol): self._L() elif self.ch.symbol == ';': self.getNextch() else: self._err(self.ch.line, 'GRAMMAR-', 'Wrong Expression') def _E(self): if self.ch.symbol == 'c': self.getNextch() if self.ch.symbol == '(': self.getNextch() r = self._M() if self.ch.symbol == ')': label1 = label() label2 = label() self.seq_list.append(Sequence(action='j=', p1=0, p2=r, result=label1)) self.seq_num += 1 self.getNextch() self._D() self.seq_list.append(Sequence(action='j', result=label2)) self.seq_num += 1 label1.value = self.seq_num self._Q() label2.value = self.seq_num else: self._err(self.ch.line, 'LOST-', 'Lost )') else: self._err(self.ch.line, 'LOST-', 'Lost (') else: self._err(self.ch.line, 'GRAMMAR-', 'Wrong if expression') def _Q(self): if self.ch.symbol == 'd': self.getNextch() self._D() else: self._err(self.ch.line, 'GRAMMAR-', 'Wrong else expression') def _F(self): if self.ch.symbol == 'f': label1 = label() label2 = label() self.getNextch() label1.value
<filename>pysem/model_generalized_effects.py # -*- coding: utf-8 -*- """Generalized Random Effects SEM.""" from .utils import chol_inv, chol_inv2, cov from scipy.linalg import solve_sylvester from .model_means import ModelMeans from itertools import combinations from functools import partial from . import startingvalues from copy import deepcopy import pandas as pd import numpy as np class ModelGeneralizedEffects(ModelMeans): """ Generalized Random Effects model. Generalized Random Effects SEM is a generalization of ModelEffects in a sense, that it allows for an arbitrary number of random effects, and also it allows to introduce parametic covariance-between-observations marices. The latter can be thought of as in context of time-series or spatial data. """ def __init__(self, description: str, effects: tuple, mimic_lavaan=False, baseline=False, cov_diag=False, intercepts=True): """ Instantiate Generalized Random Effects SEM model. Parameters ---------- description : str Model description in semopy syntax. effects : tuple, EffectBase A list of Effects or a single effect. mimic_lavaan: bool If True, output variables are correlated and not conceptually identical to indicators. lavaan treats them that way, but it's less computationally effective. The default is False. baseline : bool If True, the model will be set to baseline model. Baseline model here is an independence model where all variables are considered to be independent with zero covariance. Only variances are estimated. The default is False. cov_diag : bool If cov_diag is True, then there are no covariances parametrised unless explicitly specified. The default is False. intercepts: bool If True, intercepts are also modeled. Intercept terms can be accessed via "1" symbol in a regression equation, i.e. x1 ~ 1. The default is False. Returns ------- None. """ if type(effects) not in (list, tuple): effects = (effects,) self.effects = effects self.symbs_rf = [f'~{i + 1}~' for i in range(len(effects))] matrices = list(self.matrices_names) for i, symb in enumerate(self.symbs_rf): name = f'd{i + 1}' setattr(self, f'build_{name}', self.build_d) setattr(self, f'start_{name}', startingvalues.start_d) matrices.append(name) f = partial(self.effect_rf_covariance, mx=name) self.dict_effects[symb] = f self.matrices_names = tuple(matrices) super().__init__(description, mimic_lavaan=False, baseline=baseline, cov_diag=cov_diag, intercepts=intercepts) self.objectives = {'FIML': (self.obj_matnorm, self.grad_matnorm)} def preprocess_effects(self, effects: dict): """ Run a routine just before effects are applied. Used to apply random effect variance Parameters ------- effects : dict Mapping opcode->lvalues->rvalues->multiplicator. Returns ------- None. """ super().preprocess_effects(effects) obs = self.vars['observed'] for i, effect in enumerate(self.effects): symb = self.symbs_rf[i] mode = effect.d_mode if mode in ('diag', 'full'): for v in obs: t = effects[symb][v] if v not in t: t[v] = None if mode == 'full': for a, b in combinations(obs, 2): t = effects[symb][a] tt = effects[symb][b] if (v not in t) and (v not in tt): t[b] = None else: if mode != 'scale': raise Exception(f'Unknown mode "{mode}".') param = f'paramD{i + 1}' for v in obs: t = effects[symb][v][v] = param def load(self, data, cov=None, clean_slate=False, n_samples=None, **kwargs): """ Load dataset. Parameters ---------- data : pd.DataFrame Data with columns as variables. cov : pd.DataFrame, optional Pre-computed covariance/correlation matrix. Used only for variance starting values. The default is None. clean_slate : bool, optional If True, resets parameters vector. The default is False. n_samples : int, optional Redunant for ModelEffects. The default is None. **kwargs : dict Extra arguments are sent to Effects. KeyError Rises when there are missing variables from the data. Exception Rises when group parameter is None. Returns ------- None. """ if data is None: if not hasattr(self, 'mx_data'): raise Exception("Data must be provided.") if clean_slate: self.prepare_params() return else: data = data.copy() obs = self.vars['observed'] exo = self.vars['observed_exogenous'] if self.intercepts: data['1'] = 1.0 cols = data.columns missing = (set(obs) | set(exo)) - set(set(cols)) if missing: t = ', '.join(missing) raise KeyError(f'Variables {t} are missing from data.') self.load_data(data, covariance=cov, **kwargs) self.load_starting_values() if clean_slate or not hasattr(self, 'param_vals'): self.prepare_params() def prepare_params(self): """ Prepare structures for effective optimization routines. Returns ------- None. """ super().prepare_params() extra = np.array([]) ranges = list() a = len(self.param_vals) for effect in self.effects: extra = np.append(extra, effect.parameters) b = a + len(effect.parameters) ranges.append((a, b)) a = b self.param_vals = np.append(self.param_vals, extra) self.effects_param_ranges = ranges def update_matrices(self, params: np.ndarray): """ Update all matrices from a parameter vector. Parameters ---------- params : np.ndarray Vector of parameters. Returns ------- None. """ super().update_matrices(params) for effect, (a, b) in zip(self.effects, self.effects_param_ranges): effect.parameters[:] = params[a:b] def build_d(self): """ D matrix is a covariance matrix for random effects across columns. Returns ------- np.ndarray Matrix. tuple Tuple of rownames and colnames. """ names = self.vars['observed'] n = len(names) mx = np.zeros((n, n)) return mx, (names, names) def effect_rf_covariance(self, items: dict, mx: str): """ Work through random effects covariance operation. Parameters ---------- items : dict Mapping lvalues->rvalues->multiplicator. mx : str Name of the D matrix. Returns ------- None. """ rows, cols = getattr(self, f'names_{mx}') mx = getattr(self, f'mx_{mx}') for lv, rvs in items.items(): for rv, mult in rvs.items(): name = None try: val = float(mult) active = False except (TypeError, ValueError): active = True if mult is not None: if mult != self.symb_starting_values: name = mult else: active = False val = None if name is None: self.n_param_cov += 1 name = '_c%s' % self.n_param_cov i, j = rows.index(lv), cols.index(rv) ind = (i, j) if i == j: bound = (0, None) symm = False else: if self.baseline: continue bound = (None, None) symm = True self.add_param(name, matrix=mx, indices=ind, start=val, active=active, symmetric=symm, bound=bound) def get_bounds(self): """ Get bound constraints on parameters. Returns ------- list List of tuples specifying bounds. """ b = super().get_bounds() for effect in self.effects: b.extend(effect.get_bounds()) return b def fit(self, data=None, cov=None, obj='ML', solver='SLSQP', groups=None, clean_slate=False, **kwargs): """ Fit model to data. Parameters ---------- data : pd.DataFrame, optional Data with columns as variables. The default is None. cov : pd.DataFrame, optional Pre-computed covariance/correlation matrix. The default is None. obj : str, optional Objective function to minimize. Possible values are 'REML', "ML". The default is 'ML'. solver : TYPE, optional Optimizaiton method. Currently scipy-only methods are available. The default is 'SLSQP'. groups : list, optional Groups of size > 1 to center across. The default is None. clean_slate : bool, optional If False, successive fits will be performed with previous results as starting values. If True, parameter vector is reset each time prior to optimization. The default is False. Raises ------ NotImplementedError Rises when unknown objective name is passed. Returns ------- SolverResult Information on optimization process. """ if obj == 'ML': res = super().fit(data=data, cov=cov, obj='ML', solver=solver, groups=groups, clean_slate=clean_slate, **kwargs) return res else: raise NotImplementedError(f"Unknown method {obj}.") ''' ---------------------Preparing structures for a more----------------------- ------------------------efficient computations----------------------------- ''' def load_data(self, data: pd.DataFrame, covariance=None, **kwargs): """ Load dataset from data matrix. Parameters ---------- data : pd.DataFrame Dataset with columns as variables and rows as observations. covariance : pd.DataFrame, optional Custom covariance matrix. The default is None. **kwargs : dict Extra arguments are sent to Effects. Returns ------- None. """ obs = self.vars['observed'] self.mx_g = data[self.vars['observed_exogenous']].values.T if len(self.mx_g.shape) != 2: self.mx_g = self.mx_g[np.newaxis, :] self.mx_data = data[obs].values.T self.n_obs, self.n_samples = self.mx_data.shape self.num_m = len(set(self.vars['observed']) - self.vars['latent']) self.load_cov(covariance[obs].loc[obs] if covariance is not None else cov(self.mx_data.T)) d_matrices = list() for i, effect in enumerate(self.effects): effect.load(i, self, data, **kwargs) d = getattr(self, f'mx_d{i + 1}') d_matrices.append(d) self.mxs_d = d_matrices self.mx_identity = np.identity(self.n_samples) ''' ----------------------------LINEAR ALGEBRA PART--------------------------- ----------------------The code below is responsible----------------------- ------------------for covariance structure computations------------------- ''' def calc_l(self, sigma=None, k=None): """ Calculate covariance across columns matrix T. Parameters ---------- sigma: np.ndarray, optional Sigma covariance matrix as returned by calc_sigma. Although there is no meaningful concept of Sigma matrix in ModelEffects, it is still computationally convenient to separate it into an extra element. If None, then it will computed automatically. The default is None. k: tuple, optional List of K matrices as returned by calc_k by Effects. If None, then calculated in place. The default is None. Returns ------- np.ndarray Covariance across columns (variables) matrix T. """ if sigma is None: sigma, _ = self.calc_sigma() if k is None: k = self.calc_ks() n = self.n_samples return sum(np.trace(k) * d for d, k in zip(self.mxs_d, k)) + n
<reponame>jlisee/xpkg<gh_stars>1-10 # Author: <NAME> <<EMAIL>> # Python Imports import json import os import tarfile from collections import defaultdict # Project Imports from xpkg import build from xpkg import linux from xpkg import util from xpkg import paths xpkg_root_var = 'XPKG_ROOT' xpkg_tree_var = 'XPKG_TREE' xpkg_repo_var = 'XPKG_REPO' xpkg_local_cache_var = 'XPKG_LOCAL_CACHE' def parse_dependency(value): """ Basic support for version expression. Right now it just parses mypackage==1.0.0 -> ('mypackage', '1.0.0') mypackage -> ('mypackage', None) """ # Split into parts parts = value.split('==') # We always have name name = parts[0] # Pull out the version, or report an error if len(parts) == 1: version = None elif len(parts) == 2: version = parts[1] else: raise Exception('Invalid package expression: "%s"' % value) return (name, version) class Exception(BaseException): pass class InstallDatabase(object): """ Manages the on disk database of packages. """ def __init__(self, env_dir): # Package db location self._db_dir = self.db_dir(env_dir) self._db_path = os.path.join(self._db_dir, 'db.yml') # Create package database if it doesn't exist if not os.path.exists(self._db_path): self._create_db() # Load database self._load_db() def _create_db(self): """ Create database """ # Create directory if not os.path.exists(self._db_dir): os.makedirs(self._db_dir) # Create empty db file if needed if not os.path.exists(self._db_path): with open(self._db_path, 'w') as f: f.write('') def _load_db(self): """ Load DB from disk. """ self._db = util.yaml_load(open(self._db_path)) # Handle the empty database case if self._db is None: self._db = {} # Build a list of directories and the counts of package that reference # them self._gen_dir_counts() def _save_db(self): """ Save DB to disk. """ with open(self._db_path, 'w') as f: util.yaml_dump(self._db, f) def _gen_dir_counts(self): """ Generates reference counts of directories, that can be used to see if a package is the last one using that directory. """ self._dirs = defaultdict(int) for data in self._db.itervalues(): for d in data['dirs']: self._dirs[d] += 1 def mark_installed(self, name, info): """ Marks the current package installed """ # Mark package with the current installed version self._db[name] = info # Save the data to disk self._save_db() def mark_removed(self, name): """ Marks the current package installed """ # Mark package with the current installed version del self._db[name] # Save the data to disk self._save_db() def iter_packages(self): """ Returns an iterator of (package, version) pairs """ for k in self._db.iteritems(): yield k def get_info(self, name): """ Return the information on the installed package, returns None if it doesn't exist. """ return self._db.get(name, None) def installed(self, name, version=None): """ Returns true if the given package is installed, supplying no version will return true if any version is installed. """ info = self.get_info(name) if info: if version: return version == info.get('version', None) else: return True else: return False def get_rdepends(self, name): """ Get all the packages which depend on this package """ rdepends = [] for pkg_name, info in self._db.iteritems(): deps = info.get('dependencies', []) for dep in deps: dep_name, version = parse_dependency(dep) if dep_name == name: rdepends.append(pkg_name) return rdepends def dir_references(self, d): """ Returns how many packages are using this directory. """ return self._dirs[d] @staticmethod def db_dir(root): """ Returns the db directory relative to the given root. """ return os.path.join(root, 'var', 'xpkg') class Settings(object): """ Settings for the current environment. TODO: add versioning to the on disk format """ def __init__(self, path): """ Create settings object with the stored settings from the given path. """ # Load the settings data if the file exists if os.path.exists(path): settings_data = util.yaml_load(open(path)) else: settings_data = None # Lookup data based on the presence of the configuration data if settings_data is None: toolset_dict = None self.name = 'none' else: toolset_dict = settings_data.get('toolset', None) self.name = settings_data.get('name', 'unknown') # Create toolset if possible otherwise get the default if toolset_dict is None: self.toolset = build.Toolset.lookup_by_name(build.DefaultToolsetName) else: self.toolset = build.Toolset.create_from_dict(toolset_dict) class Environment(object): """ This class manages the local package environment. """ SETTINGS_PATH = os.path.join('var', 'xpkg', 'env.yml') @staticmethod def init(env_dir, name, toolset_name=None): """ Initialize the environment in the given directory. """ # Bail out with an error if the environment already exists if Environment.env_exists(env_dir): raise Exception('Environment already exists in: %s' % env_dir) # Create the empty db file (this triggers database file creation) pdb = InstallDatabase(env_dir) # Make sure we have a valid ld.so symlink linux.update_ld_so_symlink(env_dir) # Lookup our toolset and translate to dict toolset = build.Toolset.lookup_by_name(toolset_name) # Create our settings dict and write it disk settings = { 'name' : name, 'toolset' : toolset.to_dict(), } # For path to our settings files, and save it settings_path = os.path.join(env_dir, Environment.SETTINGS_PATH) with open(settings_path, 'w') as f: util.yaml_dump(settings, f) def __init__(self, env_dir=None, create=False, tree_path=None, repo_path=None, verbose=False): """ env_dir - path to the environment dir create - create the environment if it does exist tree_path - URL for a XPD tree repo_path - URL for a XPA package archive verbose - print all build commands to screen """ if env_dir is None: if xpkg_root_var in os.environ: self._env_dir = os.environ[xpkg_root_var] else: raise Exception("No XPKG_ROOT not defined, can't find environment") else: self._env_dir = env_dir self.root = self._env_dir self.verbose = verbose # Error out if we are not creating and environment and this one does # not exist if not self.env_exists(self._env_dir) and not create: raise Exception('No Xpkg environment found in "%s"' % self._env_dir) # Create environment if needed if not self.env_exists(self._env_dir) and create: self.init(self._env_dir, 'default', build.DefaultToolsetName) # If needed this will setup the empty environment self._pdb = InstallDatabase(self._env_dir) # Load the settings settings = Settings(self.env_settings_path(self._env_dir)) self.name = settings.name self.toolset = settings.toolset def get_paths(base_path, env_var): """ Parse class argument and environment variables to get path. """ # Get the raw path from our given value, or the environment variable raw_path = None if base_path: raw_path = base_path elif env_var in os.environ: raw_path = os.environ[env_var] else: raw_path = None # Turn that raw path into a list if raw_path: paths = raw_path.split(':') else: paths = [] return paths # Setup the package tree to either load from the given path or return # no packages self.tree_paths = get_paths(tree_path, xpkg_tree_var) if len(self.tree_paths) == 1: self._tree = FilePackageTree(self.tree_paths[0]) elif len(self.tree_paths) > 0: trees = [FilePackageTree(t) for t in self.tree_paths] self._tree = CombinePackageSource(trees) else: self._tree = EmptyPackageSource() # Setup the package repository so we can install pre-compiled packages self.repo_paths = get_paths(repo_path, xpkg_repo_var) if len(self.repo_paths) == 1: self._repo = FilePackageRepo(self.repo_paths[0]) elif len(self.repo_paths) > 0: repos = [FilePackageRepo(t) for t in self.repo_paths] self._repo = CombinePackageSource(repos) else: self._repo = EmptyPackageSource() # Make sure the package cache is created self._xpa_cache_dir = self.xpa_cache_dir(self._env_dir) util.ensure_dir(self._xpa_cache_dir) def install(self, input_val): """ Installs the desired input this can be any of the following: path/to/description/package.xpd path/to/binary/package.xpa package package==version """ # Check to make sure the install is allowed self._install_check(input_val) # Install our input if input_val.endswith('.xpa'): # We have a binary package so install it self._install_xpa(input_val) elif input_val.endswith('.xpd'): # Path is an xpd file load that then install xpd = XPD(input_val) self._install_xpd(xpd) else: # The input_val is a package name so parse out the desired version # and name name, version = self._parse_install_input(input_val) # First try and find the xpa (pre-compiled) version of the package xpa = self._repo.lookup(name, version) if xpa: # Install the XPA self._install_xpa(xpa) else: # No binary package try, so lets try and find a description in # the package tree xpd_data = self._tree.lookup(name, version) if xpd_data is None: msg = "Cannot find description for package: %s" % input_val raise Exception(msg) # Install the XPD self._install_xpd(xpd_data) def build_xpd(self, xpd, dest_path, verbose=False): """ Builds the given package from it's package description (XPD) data. Returns the path to the package. """ # Determine if we are doing a verbose build verbose_build = verbose or self.verbose # Make sure all dependencies are properly installed self._install_deps(xpd, build=True) # Build the package and return the path builder = build.BinaryPackageBuilder(xpd) res = builder.build(dest_path, environment=self, output_to_file=not verbose_build) return res def _install_xpd(self, xpd, build_into_env=False): """ Builds package and directly installs it into the given environment. xpd - an XPD describing the package to install. """ # Make sure all dependencies are properly installed self._install_deps(xpd) if not build_into_env: # Build the package as XPD and place it into our cache
<reponame>luqizheng/rtmplite #!/usr/bin/env python # (c) 2011, <NAME> <<EMAIL>>. No rights reserved. # Experimental rendezvous server for RTMFP in pure Python. # # This is a re-write of OpenRTMFP's Cumulus project from C++ to Python to fit with rtmplite project architecture. # The original Cumulus project is in C++ and allows rendezvous and man-in-middle mode at the server. # You can download the original project from https://github.com/OpenRTMFP/Cumulus, and compile and start on Mac OS X as follows: # $ cd CumulusLib; make -f Makefile-darwin clean; make -f Makefile-darwin # $ cd ../CumulusService; make -f Makefile-darwin # $ ./CumulusService -l8 -d all # # To get started with this rtmfp.py experiments, start it first in debug mode. # $ export PYTHONPATH=.:/path/to/p2p-sip/src:/path/to/PyCrypto # $ ./rtmfp.py -d --no-rtmp # Then compile the test Flash application by editing testP2P/Makefile to supply the path for your mxmlc. # $ cd testP2P; make; cd .. # Alternatively use the supplied testP2P.swf. # Launch your web browser to open the file testP2P/bin-debug/testP2P.swf # # For p2p-mode: first click on publisher connect and then on player connect to see the video stream going. # # For man-in-middle mode: start rtmfp.py with --middle argument. # $ ./rtmfp.py -d --no-rtmp --middle # Then click on publisher connect, copy the replacement peer id from the console of rtmfp.py and paste to the # nearID/farID box in the browser, and finally click on the player connect button to see the video stream # flowing via your server. # # For server connection mode instead of the direct (p2p) mode, start rtmfp.py without --middle argument, and # then before clicking on publisher connect, uncheck the "direct" checkbox to enable FMS_CONNECTION mode in NetStream # instead of DIRECT_CONNECTION. # # TODO: the server connection mode is not implemented yet. # TODO: the interoperability with SIP is not implemented yet. # TODO: the NAT traversal is not tested yet. ''' This is a simple RTMFP rendezvous server to enable end-to-end and client-server UDP based media transport between Flash Player instances and with this server. Protocol Description -------------------- (The description is based on the original OpenRTMFP's Cumulus project as well as http://www.ietf.org/proceedings/10mar/slides/tsvarea-1.pdf) Session An RTMFP session is an end-to-end bi-directional pipe between two UDP transport addresses. A transport address contains an IP address and port number, e.g., "172.16.17.32:1935". A session can have one or more flows where a flow is a logical path from one entity to another via zero or more intermediate entities. UDP packets containing encrypted RTMFP data are exchanged in a session. A packet contains one or more messages. A packet is always encrypted using AES with 128-bit keys. In the protocol description below, all numbers are in network byte order (big-endian). The | operator indicates concatenation of data. The numbers are assumed to be unsigned unless mentioned explicitly. Scrambled Session ID The packet format is as follows. Each packet has the first 32 bits of scrambled session-id followed by encrypted part. The scrambled (instead of raw) session-id makes it difficult if not impossible to mangle packets by middle boxes such as NATs and layer-4 packet inspectors. The bit-wise XOR operator is used to scramble the first 32-bit number with subsequent two 32-bit numbers. The XOR operator makes it possible to easily unscramble. packet := scrambled-session-id | encrypted-part To scramble a session-id, scrambled-session-id = a^b^c where ^ is the bit-wise XOR operator, a is session-id, and b and c are two 32-bit numbers from the first 8 bytes of the encrypted-part. To unscramble, session-id = x^y^z where z is the scrambled-session-id, and b and c are two 32-bit numbers from the first 8 bytes of the encrypted-part. The session-id determines which session keys are used for encryption and decryption of the encrypted part. There is one exception for the fourth message in the handshake which contains the non-zero session-id but the handshake (symmetric) session keys are used for encryption/decryption. For the handshake messages, a symmetric AES (advanced encryption standard) with 128-bit (16 bytes) key of "Adobe Systems 02" (without quotes) is used. For subsequent in-session messages the established asymmetric session keys are used as described later. Encryption Assuming that the AES keys are known, the encryption and decryption of the encrypted-part is done as follows. For decryption, an initialization vector of all zeros (0's) is used for every decryption operation. For encryption, the raw-part is assumed to be padded as described later, and an initialization vector of all zeros (0's) is used for every encryption operation. The decryption operation does not add additional padding, and the byte-size of the encrypted-part and the raw-part must be same. The decrypted raw-part format is as follows. It starts with a 16-bit checksum, followed by variable bytes of network-layer data, followed by padding. The network-layer data ignores the padding for convenience. raw-part := checksum | network-layer-data | padding The padding is a sequence of zero or more bytes where each byte is \xff. Since it uses 128-bit (16 bytes) key, padding ensures that the size in bytes of the decrypted part is a multiple of 16. Thus, the size of padding is always less than 16 bytes and is calculated as follows: len(padding) = 16*N - len(network-layer-data) - 1 where N is any positive number to make 0 <= padding-size < 16 For example, if network-layer-data is 84 bytes, then padding is 16*6-84-1=11 bytes. Adding a padding of 11 bytes makes the decrypted raw-part of size 96 which is a multiple of 16 (bytes) hence works with AES with 128-bit key. Checksum The checksum is calculated over the concatenation of network-layer-data and padding. Thus for the encoding direction you should apply the padding followed by checksum calculation and then AES encrypt, and for the decoding direction you should AES decrypt, verify checksum and then remove the (optional) padding if needed. Usually padding removal is not needed because network-layer data decoders will ignore the remaining data anyway. The 16-bit checksum number is calculated as follows. The concatenation of network-layer-data and padding is treated as a sequence of 16-bit numbers. If the size in bytes is not an even number, i.e., not divisible by 2, then the last 16-bit number used in the checksum calculation has that last byte in the least-significant position (weird!). All the 16-bit numbers are added in to a 32-bit number. The first 16-bit and last 16-bit numbers are again added, and the resulting number's first 16 bits are added to itself. Only the least-significant 16 bit part of the resulting sum is used as the checksum. Network Layer Data The network-layer data contains flags, optional timestamp, optional timestamp echo and one or more chunks. network-layer-data = flags | timestamp | timestamp-echo | chunks ... The flags value is a single byte containing these information: time-critical forward notification, time-critical reverse notification, whether timestamp is present? whether timestamp echo is present and initiator/responder marker. The initiator/responder marker is useful if the symmetric (handshake) session keys are used for AES, so that it protects against packet loopback to sender. The bit format of the flags is not clear, but the following applies. For the handshake messages, the flags is \x0b. When the flags' least-significant 4-bits are 1101b then the timestamp-echo is present. The timestamp seems to be always present. For in-session messages, the last 4-bits are either 1101b or 1001b. -------------------------------------------------------------------- flags meaning -------------------------------------------------------------------- 0000 1011 setup/handshake 0100 1010 in-session no timestamp-echo (server to Flash Player) 0100 1110 in-session with timestamp-echo (server to Flash Player) xxxx 1001 in-session no timestamp-echo (Flash Player to server) xxxx 1101 in-session with timestamp-echo (Flash Player to server) -------------------------------------------------------------------- TODO: looks like bit \x04 indicates whether timestamp-echo is present. Probably \x80 indicates whether timestamp is present. last two bits of 11b indicates handshake, 10b indicates server to client and 01b indicates client to server. The timestamp is a 16-bit number that represents the time with 4 millisecond clock. The wall clock time can be used for generation of this timestamp value. For example if the current time in seconds is tm = 1319571285.9947701 then timestamp is calculated as follows: int(time * 1000/4) & 0xffff = 46586 , i.e., assuming 4-millisecond clock, calculate the clock units and use the least significant 16-bits. The timestamp-echo is just the timestamp value that was received in the incoming request
<gh_stars>10-100 import warnings from django.conf import settings from django.core.files.base import ContentFile import six from smartfields.fields import ImageFieldFile from smartfields.processors.base import BaseFileProcessor from smartfields.utils import ProcessingError from smartfields.processors.mixin import CloudExternalFileProcessorMixin try: from PIL import Image except ImportError: Image = None try: from wand.image import Image as WandImage except ImportError: WandImage = None __all__ = [ 'ImageProcessor', 'ImageFormat', 'supported_formats', 'WandImageProcessor', 'CloudImageProcessor' ] PILLOW_MODES = [ '1', # (1-bit pixels, black and white, stored with one pixel per byte) 'L', # (8-bit pixels, black and white) 'LA', # greyscale with alpha 'P', # (8-bit pixels, mapped to any other mode using a color palette) 'RGB', # (3x8-bit pixels, true color) 'RGBA', # (4x8-bit pixels, true color with transparency mask) 'CMYK', # (4x8-bit pixels, color separation) 'YCbCr', # (3x8-bit pixels, color video format) 'LAB', # (3x8-bit pixels, the L*a*b color space) 'HSV', # (3x8-bit pixels, Hue, Saturation, Value color space) 'I', # (32-bit signed integer pixels) 'F', # (32-bit floating point pixels) ] PILLOW_IMAGE_SUPPORT = { 'BMP': ( ['bmp', 'dib'], ['RGB', 'P', 'L', '1'], ['RGB', 'P', 'L', '1']), 'EPS': ( ['eps', 'ps'], ['RGB', 'LAB', 'L'], ['CMYK', 'RGB', 'L']), # - No read support 'GIF': ( ['gif'], ['P', 'L'], ['P', 'L', '1']), 'IM': ( ['im'], ['YCbCr', 'CMYK', 'RGBA', 'RGB', 'P', 'LA', 'L', '1'], ['F', 'I', 'YCbCr', 'CMYK', 'RGBA', 'RGB', 'P', 'LA', 'L', '1']), 'JPEG': ( ['jpg', 'jpe', 'jpeg', 'jfif'], ['CMYK', 'RGB', 'L'], ['CMYK', 'RGB', 'L']), 'JPEG2000': ( ['jp2', 'j2k', 'jpc', 'jpf', 'jpx', 'j2c'], ['RGBA', 'RGB', 'LA', 'L'], ['RGBA', 'RGB', 'LA', 'L']), 'MSP': ( ['msp'], ['1'], ['1']), 'PCX': (['pcx'], ['RGB', 'P', 'L', '1'], ['RGB', 'P', 'L', '1']), 'PNG': (['png'], ['RGBA', 'RGB', 'P', 'L', '1'], ['RGBA', 'RGB', 'P', 'L', '1']), 'PPM': (['ppm', 'pgm', 'pbm'], ['RGB', 'L', '1'], ['RGB', 'L', '1']), 'SPIDER': (['spi'], ['F;32F'], ['F;32F']), 'TIFF': (['tif', 'tiff'], ['F', 'I', 'YCbCr', 'CMYK', 'RGBA', 'RGB', 'P', 'LA', 'L', '1'], ['F', 'I', 'LAB', 'YCbCr', 'CMYK', 'RGBA', 'RGB', 'P', 'LA', 'L', '1']), 'WEBP': (['webp'], ['RGBA', 'RGB'], ['RGBA', 'RGB']), 'XBM': (['xbm'], ['1'], ['1']), 'DCX': (['dcx'], ['1', 'L', 'P', 'RGB'], None), # - Intel fax format # PCD format segfaults: https://github.com/python-pillow/Pillow/issues/568 # 'PCD': (['pcd'], ['RGB'], None), 'PDF': (['pdf'], None, ['1', 'RGB']), 'PSD': (['psd'], ['P'], None), 'XPM': (['xpm'], ['P'], None), 'SGI': (['sgi'], ['L', 'RGB'], None), 'TGA': (['tga', 'tpic'], ['RGB', 'RGBA'], None) } def _round(val): # emulate python3 way of rounding toward the even choice new_val = int(round(val)) if abs(val - new_val) == 0.5 and new_val % 2 == 1: return new_val - 1 return new_val class ImageFormat(object): def __init__(self, format, mode=None, ext=None, save_kwargs=None): self.format = format self.mode = mode self.ext = ext self.exts, self.input_modes, self.output_modes = PILLOW_IMAGE_SUPPORT[format] assert mode is None or (self.can_write and mode in self.output_modes), \ "Pillow cannot write \"%s\" in this mode: \"%s\"" % (self.format, mode) self.save_kwargs = save_kwargs or {} def __str__(self): return self.format def __eq__(self, other): return str(self) == str(other) @property def can_read(self): return self.input_modes is not None @property def can_write(self): return self.output_modes is not None def get_ext(self): if self.ext is not None: return self.ext return self.exts[0] def get_exts(self): """Returns a string of comma separated known extensions for this format""" return ','.join(self.exts) def get_mode(self, old_mode=None): """Returns output mode. If `mode` not set it will try to guess best mode, or next best mode comparing to old mode """ if self.mode is not None: return self.mode assert self.can_write, "This format does not have a supported output mode." if old_mode is None: return self.output_modes[0] if old_mode in self.output_modes: return old_mode # now let's get best mode available from supported try: idx = PILLOW_MODES.index(old_mode) except ValueError: # maybe some unknown or uncommon mode return self.output_modes[0] for mode in PILLOW_MODES[idx+1:]: if mode in self.output_modes: return mode # since there is no better one, lets' look for closest one in opposite direction opposite = PILLOW_MODES[:idx] opposite.reverse() for mode in opposite: if mode in self.output_modes: return mode class ImageFormats(dict): def __init__(self, formats): super(ImageFormats, self).__init__([(f, ImageFormat(f)) for f in formats]) @property def input_exts(self): return ','.join([f.get_exts() for _, f in six.iteritems(self) if f.can_read]) supported_formats = ImageFormats(getattr(settings, 'SMARTFIELDS_IMAGE_FORMATS', [ 'PCX', 'XPM', 'TIFF', 'JPEG', 'XBM', 'GIF', 'IM', 'PSD', 'PPM', 'SGI', 'BMP', 'TGA', 'PNG', # 'DCX', 'EPS', 'PCD', 'PDF' - not useful or buggy formats ])) class ImageProcessor(BaseFileProcessor): field_file_class = ImageFieldFile supported_formats = supported_formats @property def resample(self): # resampling was renamed from Image.ANTIALIAS to Image.LANCZOS return getattr(Image, 'LANCZOS', getattr(Image, 'ANTIALIAS')) def get_params(self, **kwargs): params = super(ImageProcessor, self).get_params(**kwargs) if 'format' in params: format = params['format'] if not isinstance(format, ImageFormat): format = ImageFormat(format) assert format.can_write, \ "This format: \"%s\" is not supported for output." % format params['format'] = format return params def check_params(self, **kwargs): params = self.get_params(**kwargs) scale = params.get('scale', None) if scale is not None: self._check_scale_params(**scale) def get_ext(self, **kwargs): try: format = self.get_params(**kwargs)['format'] ext = format.get_ext() if ext: return ".%s" % ext elif ext is not None: return ext except KeyError: pass def _check_scale_params(self, width=None, height=None, min_width=None, min_height=None, max_width=None, max_height=None, preserve=True): assert width is None or (min_width is None and max_width is None), \ "min_width or max_width don't make sence if width cannot be changed" assert height is None or (min_height is None and max_height is None), \ "min_height or max_height don't make sence if height cannot be changed" assert min_width is None or max_width is None or min_width < max_width, \ "min_width should be smaller than max_width" assert min_height is None or max_height is None or min_height < max_height, \ "min_height should be smaller than max_height" if preserve: assert width is None or height is None, \ "cannot preserve ratio when both width and height are set" assert width is None or (min_height is None and max_height is None), \ "cannot preserve ratio when width is set and there are restriction on height" assert height is None or (min_width is None and max_width is None), \ "cannot preserve ratio when height is set and there are restriction on width" assert min_width is None or max_height is None assert max_width is None or min_height is None def get_dimensions(self, old_width, old_height, width=None, height=None, min_width=None, min_height=None, max_width=None, max_height=None, preserve=True): self._check_scale_params( width, height, min_width, min_height, max_width, max_height, preserve) ratio = float(old_width)/old_height new_width, new_height = old_width, old_height if width is not None: new_width = width if preserve: new_height = _round(new_width/ratio) if height is not None: new_height = height if preserve: new_width = _round(new_height*ratio) if min_width and min_width > new_width: new_width = min_width if preserve: new_height = _round(new_width/ratio) if min_height and min_height > new_height: new_height = min_height if preserve: new_width = _round(new_height*ratio) if max_width and max_width < new_width: new_width = max_width if preserve: new_height = _round(new_width/ratio) if max_height and max_height < new_height: new_height = max_height if preserve: new_width = _round(new_height*ratio) return new_width, new_height def resize(self, image, scale=None, **kwargs): if scale is not None: new_size = self.get_dimensions(*image.size, **scale) if image.size != new_size: return image.resize(new_size, resample=self.resample) return image def convert(self, image, format=None, **kwargs): if format is None: return None new_mode = format.get_mode(old_mode=image.mode) if new_mode != image.mode: if new_mode == 'P': # TODO: expiremental, need some serious testing palette_size = 256 if image.palette: palette_size = len(image.palette.getdata()[1]) // 3 image = image.convert( new_mode, palette=Image.ADAPTIVE, colors=palette_size) else: image = image.convert(new_mode) if format != image.format: stream_out = six.BytesIO() image.save(stream_out, format=str(format), **format.save_kwargs) return stream_out def get_image(self, stream, **kwargs): with warnings.catch_warnings(): if not settings.DEBUG: warnings.simplefilter("error", Image.DecompressionBombWarning) image = Image.open(stream) return image def process(self, value, scale=None, format=None, **kwargs): cur_pos = value.tell() value.seek(0) stream = six.BytesIO(value.read()) stream_out = None value.seek(cur_pos) try: image = self.get_image(stream, scale=scale, format=format, **kwargs) image = self.resize(image, scale=scale, format=format, **kwargs) stream_out = self.convert(image, scale=scale, format=format, **kwargs) if stream_out is not None: content = stream_out.getvalue() else: content = stream.getvalue() except (IOError, OSError, Image.DecompressionBombWarning) as e: raise ProcessingError( "There was a problem with image conversion: %s" % e) finally: if stream_out is not None: stream_out.close() stream.close() return ContentFile(content) class WandImageProcessor(ImageProcessor): def resize(self, image, scale=None, **kwargs): if scale is not None: new_size = self.get_dimensions(*image.size, **scale) if image.size != new_size: image.resize(*new_size) return image def convert(self, image, format=None, **kwargs): if format is not None: image.format = str(format) stream_out = six.BytesIO() image.save(file=stream_out) return stream_out
- now) if task_details.io_timeout: out = min(out, last_io + task_details.io_timeout - now) out = max(out, 0) logging.debug('calc_yield_wait() = %d', out) return out def kill_and_wait(proc, grace_period, reason): logging.warning('SIGTERM finally due to %s', reason) proc.terminate() try: proc.wait(grace_period) except subprocess42.TimeoutError: logging.warning('SIGKILL finally due to %s', reason) proc.kill() exit_code = proc.wait() logging.info('Waiting for process exit in finally - done') return exit_code def fail_without_command(remote, bot_id, task_id, params, cost_usd_hour, task_start, exit_code, stdout): now = monotonic_time() params['cost_usd'] = cost_usd_hour * (now - task_start) / 60. / 60. params['duration'] = now - task_start params['io_timeout'] = False params['hard_timeout'] = False # Ignore server reply to stop. remote.post_task_update(task_id, bot_id, params, (stdout, 0), 1) return { u'exit_code': exit_code, u'hard_timeout': False, u'io_timeout': False, u'must_signal_internal_failure': None, u'version': OUT_VERSION, } def run_command(remote, task_details, work_dir, cost_usd_hour, task_start, run_isolated_flags, bot_file): """Runs a command and sends packets to the server to stream results back. Implements both I/O and hard timeouts. Sends the packets numbered, so the server can ensure they are processed in order. Returns: Metadata dict with the execution result. Raises: ExitSignal if caught some signal when starting or stopping. InternalError on unexpected internal errors. """ # TODO(maruel): This function is incomprehensible, split and refactor. # Signal the command is about to be started. It is important to post a task # update *BEFORE* starting any user code to signify the server that the bot # correctly started processing the task. In the case of non-idempotent task, # this signal is used to know if it is safe to retry the task or not. See # _reap_task() in task_scheduler.py for more information. last_packet = start = now = monotonic_time() task_id = task_details.task_id bot_id = task_details.bot_id params = { 'cost_usd': cost_usd_hour * (now - task_start) / 60. / 60., } if not remote.post_task_update(task_id, bot_id, params): # Don't even bother, the task was already canceled. return { u'exit_code': -1, u'hard_timeout': False, u'io_timeout': False, u'must_signal_internal_failure': None, u'version': OUT_VERSION, } isolated_result = os.path.join(work_dir, 'isolated_result.json') args_path = os.path.join(work_dir, 'run_isolated_args.json') cmd = get_run_isolated() cmd.extend(['-a', args_path]) args = get_isolated_args(remote.is_grpc(), work_dir, task_details, isolated_result, bot_file, run_isolated_flags) # Hard timeout enforcement is deferred to run_isolated. Grace is doubled to # give one 'grace_period' slot to the child process and one slot to upload # the results back. task_details.hard_timeout = 0 if task_details.grace_period: task_details.grace_period *= 2 try: # TODO(maruel): Support both channels independently and display stderr in # red. env = os.environ.copy() for key, value in (task_details.env or {}).iteritems(): if not value: env.pop(key, None) else: env[key] = value logging.info('cmd=%s', cmd) logging.info('cwd=%s', work_dir) logging.info('env=%s', env) fail_on_start = lambda exit_code, stdout: fail_without_command( remote, bot_id, task_id, params, cost_usd_hour, task_start, exit_code, stdout) # We write args to a file since there may be more of them than the OS # can handle. try: with open(args_path, 'wb') as f: json.dump(args, f) except (IOError, OSError) as e: return fail_on_start( -1, 'Could not write args to %s: %s' % (args_path, e)) # Start the command try: assert cmd and all(isinstance(a, basestring) for a in cmd) proc = subprocess42.Popen( cmd, env=env, cwd=work_dir, detached=True, stdout=subprocess42.PIPE, stderr=subprocess42.STDOUT, stdin=subprocess42.PIPE) except OSError as e: return fail_on_start( 1, 'Command "%s" failed to start.\nError: %s' % (' '.join(cmd), e)) # Monitor the task output_chunk_start = 0 stdout = '' exit_code = None had_io_timeout = False must_signal_internal_failure = None kill_sent = False timed_out = None try: calc = lambda: calc_yield_wait( task_details, start, last_io, timed_out, stdout) maxsize = lambda: MAX_CHUNK_SIZE - len(stdout) last_io = monotonic_time() for _, new_data in proc.yield_any(maxsize=maxsize, timeout=calc): now = monotonic_time() if new_data: stdout += new_data last_io = now # Post update if necessary. if should_post_update(stdout, now, last_packet): last_packet = monotonic_time() params['cost_usd'] = ( cost_usd_hour * (last_packet - task_start) / 60. / 60.) if not remote.post_task_update( task_id, bot_id, params, (stdout, output_chunk_start)): # Server is telling us to stop. Normally task cancellation. if not kill_sent: logging.warning('Server induced stop; sending SIGKILL') proc.kill() kill_sent = True output_chunk_start += len(stdout) stdout = '' # Send signal on timeout if necessary. Both are failures, not # internal_failures. # Eventually kill but return 0 so bot_main.py doesn't cancel the task. if not timed_out: if (task_details.io_timeout and now - last_io > task_details.io_timeout): had_io_timeout = True logging.warning( 'I/O timeout is %.3fs; no update for %.3fs sending SIGTERM', task_details.io_timeout, now - last_io) proc.terminate() timed_out = monotonic_time() else: # During grace period. if not kill_sent and now - timed_out >= task_details.grace_period: # Now kill for real. The user can distinguish between the following # states: # - signal but process exited within grace period, # (hard_|io_)_timed_out will be set but the process exit code will # be script provided. # - processed exited late, exit code will be -9 on posix. logging.warning( 'Grace of %.3fs exhausted at %.3fs; sending SIGKILL', task_details.grace_period, now - timed_out) proc.kill() kill_sent = True logging.info('Waiting for process exit') exit_code = proc.wait() except ( ExitSignal, InternalError, IOError, OSError, remote_client.InternalError) as e: # Something wrong happened, try to kill the child process. must_signal_internal_failure = str(e.message or 'unknown error') exit_code = kill_and_wait(proc, task_details.grace_period, e.message) # This is the very last packet for this command. It if was an isolated task, # include the output reference to the archived .isolated file. now = monotonic_time() params['cost_usd'] = cost_usd_hour * (now - task_start) / 60. / 60. params['duration'] = now - start params['io_timeout'] = had_io_timeout had_hard_timeout = False try: if not os.path.isfile(isolated_result): # It's possible if # - run_isolated.py did not start # - run_isolated.py started, but arguments were invalid # - host in a situation unable to fork # - grand child process outliving the child process deleting everything # it can # Do not create an internal error, just send back the (partial) # view as task_runner saw it, for example the real exit_code is # unknown. logging.warning('there\'s no result file') if exit_code is None: exit_code = -1 else: # See run_isolated.py for the format. with open(isolated_result, 'rb') as f: run_isolated_result = json.load(f) logging.debug('run_isolated:\n%s', run_isolated_result) # TODO(maruel): Grab statistics (cache hit rate, data downloaded, # mapping time, etc) from run_isolated and push them to the server. if run_isolated_result['outputs_ref']: params['outputs_ref'] = run_isolated_result['outputs_ref'] had_hard_timeout = run_isolated_result['had_hard_timeout'] if not had_io_timeout and not had_hard_timeout: if run_isolated_result['internal_failure']: must_signal_internal_failure = ( run_isolated_result['internal_failure']) logging.error('%s', must_signal_internal_failure) elif exit_code: # TODO(maruel): Grab stdout from run_isolated. must_signal_internal_failure = ( 'run_isolated internal failure %d' % exit_code) logging.error('%s', must_signal_internal_failure) exit_code = run_isolated_result['exit_code'] params['bot_overhead'] = 0. if run_isolated_result.get('duration') is not None: # Calculate the real task duration as measured by run_isolated and # calculate the remaining overhead. params['bot_overhead'] = params['duration'] params['duration'] = run_isolated_result['duration'] params['bot_overhead'] -= params['duration'] params['bot_overhead'] -= run_isolated_result.get( 'download', {}).get('duration', 0) params['bot_overhead'] -= run_isolated_result.get( 'upload', {}).get('duration', 0) params['bot_overhead'] -= run_isolated_result.get( 'cipd', {}).get('duration', 0) if params['bot_overhead'] < 0: params['bot_overhead'] = 0 isolated_stats = run_isolated_result.get('stats', {}).get('isolated') if isolated_stats: params['isolated_stats'] = isolated_stats cipd_stats = run_isolated_result.get('stats', {}).get('cipd') if cipd_stats: params['cipd_stats'] = cipd_stats cipd_pins = run_isolated_result.get('cipd_pins') if cipd_pins: params['cipd_pins'] = cipd_pins except (IOError, OSError, ValueError) as e: logging.error('Swallowing error: %s', e) if not must_signal_internal_failure: must_signal_internal_failure = '%s\n%s' % ( e, traceback.format_exc()[-2048:]) # TODO(maruel): Send the internal failure here instead of sending it through # bot_main, this causes a race condition. if exit_code is None: exit_code = -1 params['hard_timeout'] = had_hard_timeout # Ignore server reply to stop. Also ignore internal errors here if we are # already handling some. try: remote.post_task_update( task_id, bot_id, params, (stdout, output_chunk_start), exit_code) except remote_client.InternalError as e: logging.error('Internal error while finishing the task: %s', e) if not must_signal_internal_failure: must_signal_internal_failure = str(e.message or 'unknown error') return { u'exit_code': exit_code, u'hard_timeout': had_hard_timeout, u'io_timeout': had_io_timeout, u'must_signal_internal_failure': must_signal_internal_failure, u'version': OUT_VERSION, } finally: file_path.try_remove(unicode(isolated_result)) def main(args): subprocess42.inhibit_os_error_reporting() parser = optparse.OptionParser(description=sys.modules[__name__].__doc__) parser.add_option('--in-file', help='Name of the request file') parser.add_option( '--out-file', help='Name of the JSON file to write a task summary to') parser.add_option( '--swarming-server', help='Swarming server to send data back') parser.add_option( '--is-grpc', action='store_true', help='Communicate to Swarming via gRPC') parser.add_option( '--cost-usd-hour', type='float', help='Cost of this VM in $/h') parser.add_option('--start', type='float', help='Time this task was started') parser.add_option( '--bot-file', help='Path to a file describing the state of the host.')
'Ölmezbey':'', 'Özbek':'', 'Özben':'', 'Özberk':'', 'Özbey':'', 'Özbil':'', 'Özbilek':'', 'Özbilen':'', 'Özbilge':'', 'Özbilgin':'', 'Özbilir':'', 'Özbir':'', 'Özcebe':'', 'Pembe':'', 'Pembegül':'', 'Rebi':'', 'Rebii':'', 'Rebiyye':'', 'Rehber':'', 'Sebih':'', 'Sebil':'', 'Sebile':'', 'Seblâ':'', 'Sebu':'', 'Sebük':'', 'Sebüktekin':'', 'Serbülent':'', 'Sibel':'', 'Simber':'', 'Soylubey':'', 'Subegi':'', 'Subhi':'', 'Subhiye':'', 'Sulbiye':'', 'Sülünbike':'', 'Sümbül':'', 'Sümbülveş':'', 'Sünbüle':'', 'Şebnem':'', 'Şebnur':'', 'Şekibe':'', 'Şerbet':'', 'Şirinbegim':'', 'Şirinbige':'', 'Teber':'', 'Teberhun':'', 'Teberrük':'', 'Tebessüm':'', 'Tebrik':'', 'Tekbek':'', 'Tekbey':'', 'Tekbir':'', 'Tekebey':'', 'Tellibey':'', 'Tibet':'', 'Tilbe':'', 'Tolunbike':'', 'Tosunbey':'', 'Tunçbilek':'', 'Tunçbörü':'', 'Tüblek':'', 'Ubeyde':'', 'Ubeyt':'', 'Uçbeyi':'', 'Uğurlubey':'', 'Ulubek':'', 'Uluberk':'', 'Ulubey':'', 'Uluğbey':'', 'Umurbey':'', 'Urbeyi':'', 'Usberk':'', 'Usbey':'', 'Usunbike':'', 'Übeyd':'', 'Übeyde':'', 'Übeyt':'', 'Übük':'', 'Ünübol':'', 'Vecibe':'', 'Vehbi':'', 'Vehbiye':'', 'Yolbul':'', 'Zebercet':'', 'Zobu':'', 'Zorbey':'', 'Zübeyde':'', 'Zübeyr':'', 'Cedide':'', 'Celâdet':'', 'Celâl':'', 'Celâlettin':'', 'Celâli':'', 'Celâsun':'', 'Celâyir':'', 'Celil':'', 'Celile':'', 'Cem':'', 'Cemi':'', 'Cemil':'', 'Cemile':'', 'Ceminur':'', 'Cemre':'', 'Cemşir':'', 'Cemşit':'', 'Cengâver':'', 'Cenger':'', 'Cengiz':'', 'Cenk':'', 'Cenker':'', 'Cennet':'', 'Ceren':'', 'Cerit':'', 'Cesim':'', 'Cesur':'', 'Cevdet':'', 'Cevher':'', 'Cevheri':'', 'Cevri':'', 'Cevriye':'', 'Ceyhun':'', 'Ceylân':'', 'Cezlân':'', 'Cezmi':'', 'Cilvekâr':'', 'Cimşit':'', 'Cindoruk':'', 'Coşku':'', 'Coşkun':'', 'Coşkuner':'', 'Coşkunsu':'', 'Cömert':'', 'Cuci':'', 'Cudi':'', 'Cudiye':'', 'Culduz':'', 'Cumhur':'', 'Cündi':'', 'Cüneyt':'', 'Delice':'', 'Dicle':'', 'Domurcuk':'', 'Ece':'', 'Ecegül':'', 'Ecemiş':'', 'Ecenur':'', 'Ecer':'', 'Ecevit':'', 'Ecir':'', 'Ecmel':'', 'Ecvet':'', 'Ekinci':'', 'Emcet':'', 'Erce':'', 'Erciyes':'', 'Ercüment':'', 'Erincek':'', 'Erincik':'', 'Evcil':'', 'Evcimen':'', 'Evecen':'', 'Fecir':'', 'Fecri':'', 'Fecriye':'', 'Gelincik':'', 'Gence':'', 'Gencel':'', 'Gencer':'', 'Genco':'', 'Gonce':'', 'Göcek':'', 'Gökcen':'', 'Gücel':'', 'Gücer':'', 'Gücümen':'', 'Gülce':'', 'Gülece':'', 'Gülinci':'', 'Güvercin':'', 'Güzelce':'', 'Hicret':'', 'Huceste':'', 'Hüccet':'', 'Hüceste':'', 'İclâl':'', 'İmece':'', 'İnce':'', 'İncesu':'', 'İnci':'', 'İnciden':'', 'İncifem':'', 'İncifer':'', 'İncigül':'', 'İncilâ':'', 'İncilây':'', 'İncinur':'', 'İncisel':'', 'İnciser':'', 'İvecen':'', 'İyicil':'', 'Kıvılcım':'', 'Korucu':'', 'Mecdi':'', 'Mecdut':'', 'Mecide':'', 'Mecit':'', 'Mecittin':'', 'Mecnun':'', 'Mehcur':'', 'Mehcure':'', 'Mengücek':'', 'Mescur':'', 'Mevcude':'', 'Mucide':'', 'Mucip':'', 'Mucit':'', 'Mucize':'', 'Müceddet':'', 'Mücellâ':'', 'Mücessem':'', 'Mücevher':'', 'Münci':'', 'Münciye':'', 'Necdet':'', 'Necile':'', 'Necip':'', 'Neclâ':'', 'Necmi':'', 'Necmiye':'', 'Necve':'', 'Netice':'', 'Öncel':'', 'Öncü':'', 'Öncüer':'', 'Özgeci':'', 'Recep':'', 'Selcen':'', 'Sencer':'', 'Tecelli':'', 'Tecen':'', 'Tecer':'', 'Teceren':'', 'Tecim':'', 'Tecimen':'', 'Tecimer':'', 'Tecir':'', 'Ticen':'', 'Tomurcuk':'', 'Tuğcu':'', 'Tuncel':'', 'Tuncer':'', 'Ülkücü':'', 'Vecdet':'', 'Vecdi':'', 'Vechi':'', 'Vechiye':'', 'Vecih':'', 'Vecihe':'', 'Vecihi':'', 'Vecit':'', 'Yüce':'', 'Yüceer':'', 'Yücel':'', 'Yücelen':'', 'Yücelt':'', 'Yücelten':'', 'Yücenur':'', 'Yücesoy':'', 'Yücetekin':'', 'Yücetürk':'', 'Çeçen':'', 'Çekik':'', 'Çekim':'', 'Çekin':'', 'Çelem':'', 'Çelen':'', 'Çelenk':'', 'Çelik':'', 'Çelikel':'', 'Çeliker':'', 'Çelikiz':'', 'Çelikkol':'', 'Çeliköz':'', 'Çeliksu':'', 'Çelikten':'', 'Çeliktürk':'', 'Çelikyürek':'', 'Çelim':'', 'Çeltik':'', 'Çender':'', 'Çengiz':'', 'Çepni':'', 'Çerçi':'', 'Çeri':'', 'Çerkez':'', 'Çerme':'', 'Çetik':'', 'Çetin':'', 'Çetinel':'', 'Çetiner':'', 'Çetinok':'', 'Çetinöz':'', 'Çetinsoy':'', 'Çetinsu':'', 'Çetintürk':'', 'Çetinyiğit':'', 'Çevik':'', 'Çevikel':'', 'Çeviker':'', 'Çeviköz':'', 'Çevrim':'', 'Çeyiz':'', 'Çığ':'', 'Çığıl':'', 'Çığır':'', 'Çıngı':'', 'Çıvgın':'', 'Çiçek':'', 'Çiftçi':'', 'Çiğdem':'', 'Çiğil':'', 'Çiğlez':'', 'Çilek':'', 'Çilen':'', 'Çilenti':'', 'Çiler':'', 'Çimen':'', 'Çin ':'', 'Çinel':'', 'Çiner':'', 'Çinerk':'', 'Çingiz':'', 'Çinkılıç':'', 'Çinuçin':'', 'Çisen':'', 'Çisil':'', 'Çoker':'', 'Çoku':'', 'Çopur':'', 'Çotuk':'', 'Çotur':'', 'Çökermiş':'', 'Çöyür':'', 'Demirgüç':'', 'Demirkoç':'', 'Demirpençe':'', 'Dikeç':'', 'Dinç':'', 'Dinçel':'', 'Dinçer':'', 'Dinçerk':'', 'Dinçkol':'', 'Dinçkök':'', 'Dinçmen':'', 'Dinçok':'', 'Dinçol':'', 'Dinçöz':'', 'Dinçsel':'', 'Dinçsoy':'', 'Dinçsü':'', 'Dinçtürk':'', 'Direnç':'', 'Elçi':'', 'Elçim':'', 'Elçin':'', 'Emeç':'', 'Enç':'', 'Eneç':'', 'Erçelik':'', 'Erçetin':'', 'Erçevik':'', 'Erçil':'', 'Erdinç':'', 'Erengüç':'', 'Ergenç':'', 'Ergüç':'', 'Ergüleç':'', 'Ergüvenç':'', 'Erinç':'', 'Erinçer':'', 'Erkılıç':'', 'Erkoç':'', 'Erseç':'', 'Ersevinç':'', 'Ertunç':'', 'Fereç':'', 'Genç':'', 'Gençel':'', 'Gençer':'', 'Gençsoy':'', 'Gençsu':'', 'Gençtürk':'', 'Gerçek':'', 'Gerçeker':'', 'Girginkoç':'', 'Göçen':'', 'Göçer':'', 'Göçmen':'', 'Göğünç':'', 'Gökçe':'', 'Gökçeer':'', 'Gökçek':'', 'Gökçel':'', 'Gökçem':'', 'Gökçen':'', 'Gökçer':'', 'Gökçesu':'', 'Gökçil':'', 'Gökçin':'', 'Gökçül':'', 'Gökçün':'', 'Göktunç':'', 'Gönç':'', 'Gönenç':'', 'Görgüç':'', 'Göyünç':'', 'Gözenç':'', 'Güç':'', 'Güçel':'', 'Güçeren':'', 'Güçermiş':'', 'Güçlü':'', 'Güçlüer':'', 'Güçlütürk':'', 'Güçmen':'', 'Güçsel':'', 'Güçyener':'', 'Güçyeter':'', 'Gülçe':'', 'Gülçehre':'', 'Gülçiçek':'', 'Gülçimen':'', 'Gülçin':'', 'Gülçün':'', 'Güleç':'', 'Güleçer':'', 'Gümeç':'', 'Günçe':'', 'Günçiçeği':'', 'Günçiçek':'', 'Güneç':'', 'Güvenç':'', 'Hiçsönmez':'', 'İçli':'', 'İçöz':'', 'İçten':'', 'İlçi':'', 'İlginç':'', 'Kılıç':'', 'Kılıçel':'', 'Kılıçer':'', 'Kılınç':'', 'Kırçiçek':'', 'Kızıltunç':'', 'Kiçi':'', 'Koç':'', 'Koçer':'', 'Koçsoy':'', 'Koçtuğ':'', 'Koçtürk':'', 'Koçu':'', 'Koçyiğit':'', 'Konçuy':'', 'Köçeri':'', 'Lâçin':'', 'Mehmetçik':'', 'Mengüç':'', 'Meriç':'', 'Nurçin':'', 'Okçun':'', 'Okgüç':'', 'Okgüçlü':'', 'Oktunç':'', 'Olçun':'', 'Opçin':'', 'Orçun':'', 'Ortunç':'', 'Oruç':'', 'Oytunç':'', 'Öğrünç':'', 'Öğünç':'', 'Ölçüm':'', 'Ölçün':'', 'Öndünç':'', 'Öveç':'', 'Övgünç':'', 'Övünç':'', 'Özçelik':'', 'Özçevik':'', 'Özçın':'', 'Özdinç':'', 'Özdinçer':'', 'Özenç':'', 'Özerdinç':'', 'Özerinç':'', 'Özgenç':'', 'Özgüç':'', 'Özgüleç':'', 'Özkoç':'', 'Özokçu':'', 'Öztunç':'', 'Perçem':'', 'Periçehre':'', 'Pürçek':'', 'Seçen':'', 'Seçgül':'', 'Seçik':'', 'Seçil':'', 'Seçkin':'', 'Seçkiner':'', 'Seçme':'', 'Seçmeer':'', 'Seçmen':'', 'Seçmener':'', 'Selçuk':'', 'Selçuker':'', 'Selgüç':'', 'Serdengeçti':'', 'Serdinç':'', 'Sevinç':'', 'Sorguç':'', 'Soydinç':'', 'Soydinçer':'', 'Soyselçuk':'', 'Tekçe':'', 'Temuçin':'', 'Timuçin':'', 'Tonguç':'', 'Togay':'', 'Tuğçe':'', 'Tunç':'', 'Tunçdemir':'', 'Tunçel':'', 'Tunçer':'', 'Tunçkılıç':'', 'Tunçkol':'', 'Tunçkurt':'', 'Tunçok':'', 'Tunçöven':'', 'Tunçsoy':'', 'Tunçtürk':'', 'Tunguç':'', 'Tümkoç':'', 'Uç':'', 'Uçkun':'', 'Uçuk':'', 'Uçur':'', 'Uluç':'', 'Ulumeriç':'', 'Üçe':'', 'Üçel':'', 'Üçer':'', 'Üçgül':'', 'Üçışık':'', 'Üçkök':'', 'Üçok':'', 'Üçük':'', 'Ünüçok':'', 'Yoruç':'', 'Âdem':'', 'Dâhi':'', 'Dânâ':'', 'Dede':'', 'Define':'', 'Defne':'', 'Değer':'', 'Değmeer':'', 'Dehri':'', 'Delâl':'', 'Demet':'', 'Demhoş':'', 'Demir':'', 'Demirdelen':'', 'Demirdöven':'', 'Demirel':'', 'Demirer':'', 'Demirezen':'', 'Demirgülle':'', 'Demiriz':'', 'Demirkol':'', 'Demirkök':'', 'Demirkurt':'', 'Demirkut':'', 'Demirok':'', 'Demirol':'', 'Demiröz':'', 'Demirsoy':'', 'Demirtekin':'', 'Demirtuğ':'', 'Demirtürk':'', 'Demiryürek':'', 'Demren':'', 'Dengiz':'', 'Dengizer':'', 'Deniz':'', 'Denizel':'', 'Denizer':'', 'Denizmen':'', 'Deniztekin':'', 'Denk':'', 'Denkel':'', 'Denker':'', 'Denli':'', 'Denlisoy':'', 'Deren':'', 'Derenel':'', 'Derin':'', 'Derinkök':'', 'Derinöz':'', 'Derlen':'', 'Derviş':'', 'Deste':'', 'Destegül':'', 'Devin':'', 'Deviner':'', 'Devlettin':'', 'Devrim':'', 'Devrimer':'', 'Didem':'', 'Didim':'', 'Dik':'', 'Dikel':'', 'Diken':'', 'Diker':'', 'Dikey':'', 'Dikmen':'', 'Diksoy':'', 'Dil':'', 'Dilâ':'', 'Dilân':'', 'Dilâşup':'', 'Dilâver':'', 'Dilderen':'', 'Dilefruz':'', 'Dilege':'', 'Dilek':'', 'Dilem':'', 'Dilemre':'', 'Diler':'', 'Dilfigâr':'', 'Dilfiruz':'', 'Dilge':'', 'Dilhun':'', 'Dilhuş':'', 'Dilmen':'', 'Dilnişin':'', 'Dilnur':'', 'Dilsuz':'', 'Dilşen':'', 'Dilşikâr':'', 'Dilyâr':'', 'Diren':'', 'Diri':'', 'Dirik':'', 'Diriker':'', 'Dirikök':'', 'Diril':'', 'Dirim':'', 'Dirimtekin':'', 'Dirin':'', 'Diriner':'', 'Dirisoy':'', 'Dirlik':'', 'Doğru':'', 'Doğruel':'', 'Doğruer':'', 'Doğruol':'', 'Doğruöz':'', 'Doğuer':'', 'Doğuş':'', 'Dolun':'', 'Doru':'', 'Doruk':'', 'Dorukkurt':'', 'Dorukkut':'', 'Doruktekin':'', 'Doruktepe':'', 'Dost':'', 'Dölek':'', 'Dölen':'', 'Dölensoy':'', 'Döndü':'', 'Döne':'', 'Dönmez':'', 'Dönmezer':'', 'Dönmezsoy':'', 'Dönmeztekin':'', 'Dönü':'', 'Dönüş':'', 'Dudu':'', 'Duhter':'', 'Dumlu':'', 'Dumrul':'', 'Durdu':'', 'Durgun':'', 'Durguner':'', 'Durgunsu':'', 'Durkız':'', 'Durmuş':'', 'Dursun':'', 'Dursune':'', 'Durşen':'', 'Duru':'', 'Durugül':'', 'Duruiz':'', 'Duruk':'', 'Durul':'', 'Duruöz':'', 'Durusel':'', 'Durusoy':'', 'Durusu':'', 'Durutekin':'', 'Durutürk':'', 'Duşize':'', 'Duygu':'', 'Duygun':'', 'Duyu':'', 'Dülge':'', 'Dülger':'', 'Düri':'', 'Düriye':'', 'Dürnev':'', 'Dürri':'', 'Düşün':'', 'Düşünsel':'', 'Düzel':'', 'Düzey':'', 'Düzgün':'', 'Ede':'', 'Edgü':'', 'Edgüer':'', 'Edhem':'', 'Edip':'', 'Edis':'', 'Ediz':'', 'Efendi':'', 'Efgende':'', 'Ehed':'', 'Ejder':'', 'Eldem':'', 'Eldemir':'', 'Elidemir':'', 'Elverdi':'', 'Ender':'', 'Erdem':'', 'Erdemer':'', 'Erdemir':'', 'Erdemli':'', 'Erden':'', 'Erdener':'', 'Erdeniz':'', 'Erdeşir':'', 'Er':'', 'Erdi':'', 'Erdil':'', 'Erdilek':'', 'Erdin':'', 'Erdiner':'', 'Erdoğ':'', 'Erdoğdu':'', 'Erdoğmuş':'', 'Erdöl':'', 'Erdölek':'', 'Erdönmez':'', 'Erdur':'', 'Erdurdu':'', 'Erdurmuş':'', 'Erdursun':'', 'Erduru':'', 'Erendemir':'', 'Erendiz':'', 'Ergüden':'', 'Ergüder':'', 'Eryıldız':'', 'Esendemir':'', 'Evdegül':'', 'Ferdi':'', 'Ferdiye':'', 'Ferhunde':'', 'Feride':'', 'Feridun':'', 'Fermude':'', 'Ferzend':'', 'Fide':'', 'Firdevs':'', 'Firdevsi':'', 'Füruzende':'', 'Gedik':'', 'Gediz':'', 'Gökdemir':'', 'Gökdeniz':'', 'Göndem':'', 'Gönder':'', 'Gönülden':'', 'Gönüldeş':'', 'Gözde':'', 'Güdek':'', 'Güder':'', 'Güldeğer':'', 'Güldehen':'', 'Güldem':'', 'Güldemet':'', 'Gülden':'', 'Güldeniz':'', 'Güldenur':'', 'Gülder':'', 'Gülderen':'', 'Güldermiş':'', 'Güldeste':'', 'Güldilek':'', 'Güldöne':'', 'Güldüren':'', 'Gülender':'', 'Gülfide':'', 'Gündemir':'', 'Günden':'', 'Gündeniz':'', 'Günder':'', 'Gündeş':'', 'Gündoğdu':'', 'Gündoğmuş':'', 'Gündöndü':'', 'Gündüz':'', 'Güngördü':'', 'Güzide':'', 'Hediye':'', 'Hemdem':'', 'Hıdır':'', 'Hurşide':'', 'Idık':'', 'Idıkut':'', 'Ildır':'', 'Ildız':'', 'İdi':'', 'İdikurt':'', 'İdikut':'', 'İdil':'', 'İdris':'', 'İğdemir':'', 'İldem':'', 'İldemer':'', 'İldemir':'', 'İlden':'', 'İldeniz':'', 'İldeş':'', 'İskender':'', 'İşgüden':'', 'İşgüder':'', 'Jülide':'', 'Kızıldemir':'', 'Kuddus':'', 'Kuddusi':'', 'Kudret':'', 'Kudsiye':'', 'Ledün':'', 'Medeni':'', 'Medet':'', 'Medide':'', 'Medih':'', 'Medine':'', 'Medit':'', 'Mehdi':'', 'Mehdiye':'', 'Melodi':'', 'Memduh':'', 'Menderes':'', 'Merdi':'', 'Mesude':'', 'Mevdut':'', 'Mevlide':'', 'Mevlûde':'', 'Mevlüde':'', 'Mezide':'', 'Muktedir':'', 'Muslihiddin':'', 'Müderris':'', 'Müdrik':'', 'Müdrike':'', 'Müeddep':'', 'Müfide':'', 'Müjde':'', 'Müldür':'', 'Müride':'', 'Mürşide':'', 'Müveddet':'', 'Nedim':'', 'Nedime':'', 'Nedret':'', 'Neşide':'', 'Nevide':'', 'Nurdide':'', 'Nurdil':'', 'Nurdoğdu':'', 'Nurfide':'', 'Nüvide':'', 'Od':'', 'Oder':'', 'Okdemir':'', 'Okverdi':'', 'Orkide':'', 'Ödül':'', 'Önder':'', 'Önderol':'', 'Öndeş':'', 'Öründü':'', 'Özdeğer':'', 'Özdek':'', 'Özdel':'', 'Özdemir':'', 'Özden':'', 'Özdener':'', 'Özderen':'', 'Özdeş':'', 'Özdil':'', 'Özdilek':'', 'Özdoğdu':'', 'Özdoğmuş':'', 'Özdoğru':'', 'Özdoru':'', 'Özdoruk':'', 'Özdurdu':'', 'Özduru':'', 'Özdurul':'', 'Özdurum':'', 'Özender':'', 'Özerdem':'', 'Özerdim':'', 'Özönder':'', 'Özüdoğru':'', 'Özverdi':'', 'Pekdeğer':'', 'Peride':'', 'Remide':'', 'Reside':'', 'Reşide':'', 'Reşididdin':'', 'Rüveyde':'', 'Rüvide':'', 'Sedef':'', 'Seden':'', 'Sedit':'', 'Semender':'', 'Serdil':'', 'Sevde':'', 'Sevdiye':'', 'Sevgideğer':'', 'Sevindik':'', 'Seydi':'', 'Seyyide':'', 'Sıddık':'', 'Sıdkı':'', 'Sıdkıye':'', 'Sidre':'', 'Simden':'', 'Sude':'', 'Sudi':'', 'Sudiye':'', 'Suudi':'', 'Suzidil':'', 'Süerdem':'', 'Süerden':'', 'Sündüs':'', 'Süveyde':'', 'Şemdin':'', 'Şendeniz':'', 'Şendil':'', 'Şendur':'', 'Şengeldi':'', 'Şermende':'', 'Şevkidil':'', 'Şide':'', 'Tedü':'', 'Tendü':'', 'Tendürek':'', 'Tepedelen':'', 'Tevhiddin':'', 'Tevhide':'', 'Tokdemir':'', 'Topdemir':'', 'Toydemir':'', 'Toydeniz':'', 'Tümerdem':'', 'Türkdoğdu':'', 'Ufukdeniz':'', 'Uldız':'', 'Umdu':'', 'Urundu':'', 'Ülküdeş':'', 'Üngördü':'', 'Ünüdeğer':'', 'Ünverdi':'', 'Üründü':'', 'Vedi':'', 'Vedide':'', 'Vedit':'', 'Velide':'', 'Veliyüddin':'', 'Verdî':'', 'Yârıdil':'', 'Yedier':'', 'Yediger':'', 'Yediveren':'', 'Yıldıku':'', 'Yıldır':'', 'Yıldırer':'', 'Yıldırım':'', 'Yıldız':'', 'Yurdum':'', 'Yurdusev':'', 'Yurduşen':'', 'Zeyneddin':'', 'Zühdi':'', 'Âlem':'', 'Âsiye':'', 'Âtike':'', 'Âtiye':'', 'Âzime':'', 'Efe':'', 'Efgen':'', 'Efkâr':'', 'Eflâtun':'', 'Efruz':'', 'Efser':'', 'Efsun':'', 'Egemen':'', 'Egenur':'', 'Egesel':'', 'Eğilmez':'', 'Eğrek':'', 'Ehil':'', 'Ehlimen':'', 'Eke':'', 'Ekemen':'', 'Eken':'', 'Ekenel':'', 'Ekener':'', 'Ekin':'', 'Ekiner':'', 'Ekmel':'', 'Ekrem':'', 'Elâ':'', 'Elânur':'', 'Elgin':'', 'Elif':'', 'Elife':'', 'Elik':'', 'Elitez':'', 'Eliuz':'', 'Eliüstün':'', 'Elöve':'', 'Elöver':'', 'Elver':'', 'Elveren':'', 'Emek':'', 'Emel':'', 'Emet':'', 'Emin':'', 'Emine':'', 'Eminel':'', 'Emir':'', 'Emoş':'', 'Emre':'', 'Emri':'', 'Emriye':'', 'Ener':'', 'Eneren':'', 'Energin':'', 'Enes':'', 'Enfes':'', 'Engin':'', 'Enginel':'', 'Enginer':'', 'Enginiz':'', 'Enginsoy':'', 'Enginsu':'', 'Engiz':'', 'Engür':'', 'Enis':'', 'Enise':'', 'Enmutlu':'', 'Enver':'', 'Erek':'', 'Ereken':'', 'Erel':'', 'Erem':'', 'Eren':'', 'Erenel':'', 'Erengül':'', 'Erengün':'', 'Erenler':'', 'Erenöz':'', 'Erensoy':'', 'Erensü':'', 'Erentürk':'', 'Erenuluğ':'', 'Erer':'', 'Erge':'', 'Ergem':'', 'Ergen':'', 'Ergenekon':'', 'Ergener':'', 'Ergi':'', 'Ergil':'', 'Ergin':'', 'Erginel':'', 'Erginer':'', 'Erginsoy':'', 'Ergintuğ':'', 'Ergök':'', 'Ergökmen':'', 'Ergönen':'', 'Ergönül':'', 'Ergör':'', 'Ergun':'', 'Erguner':'', 'Ergül':'', 'Ergülen':'', 'Ergüler':'', 'Ergümen':'', 'Ergün':'', 'Ergüner':'', 'Ergüneş':'', 'Ergüney':'', 'Ergüven':'', 'Erhun':'', 'Erışık':'', 'Erik':'', 'Eriker':'', 'Erim':'', 'Erimel':'', 'Erimer':'', 'Erin':'', 'Erip':'', 'Eripek':'', 'Eriş':'', 'Erişen':'', 'Erişkin':'', 'Eriz':'', 'Erk':'', 'Erke':'', 'Erkel':'', 'Erker':'', 'Erkın':'', 'Erkınel':'', 'Erkış':'', 'Erkin':'', 'Erkinel':'', 'Erkiner':'', 'Erkmen':'', 'Erkmenol':'', 'Erkol':'', 'Erksoy':'', 'Erksun':'', 'Erktin':'', 'Erkul':'', 'Erkunt':'', 'Erkurt':'', 'Erkuş':'', 'Erkut':'', 'Erkutlu':'', 'Erlik':'', 'Ermiş':'', 'Ermiye':'', 'Ermutlu':'', 'Ernur':'', 'Eroğlu':'', 'Eroğul':'', 'Eroğuz':'', 'Erol':'', 'Eröge':'', 'Eröz':'', 'Ersel':'', 'Ersen':'', 'Erserim':'', 'Ersev':'', 'Erseven':'', 'Ersever':'', 'Ersevin':'', 'Ersezen':'', 'Ersezer':'', 'Ersin':'', 'Erson':'', 'Ersoy':'', 'Ersöz':'', 'Ersu':'', 'Ersun':'', 'Ersü':'', 'Erşen':'', 'Erşet':'', 'Erte':'', 'Ertek':'', 'Erteke':'', 'Ertekin':'', 'Ertem':'', 'Erten':'', 'Ertim':'', 'Ertin':'', 'Ertingü':'', 'Ertok':'', 'Ertop':'', 'Ertöre':'', 'Ertuğ':'', 'Ertuğrul':'', 'Ertut':'', 'Ertün':'', 'Ertüre':'', 'Ertürk':'', 'Ertüze':'', 'Ertüzün':'', 'Erülgen':'', 'Erün':'', 'Erüstün':'', 'Ervin':'', 'Eryetiş':'', 'Eryiğit':'', 'Erzi':'', 'Ese':'', 'Esen':'', 'Esenel':'', 'Esener':'', 'Esengül':'', 'Esengün':'', 'Esenkul':'', 'Esentimur':'', 'Esentürk':'', 'Eser':'', 'Esergül':'', 'Esim':'', 'Esin':'', 'Esiner':'', 'Eskin':'', 'Eslek':'', 'Esmer':'', 'Esvet':'', 'Eşe':'', 'Eşim':'', 'Eşin':'', 'Eşit':'', 'Eşkin':'', 'Eşmen':'', 'Eşref':'', 'Ethem':'', 'Eti':'', 'Etik':'', 'Etike':'', 'Evgin':'', 'Evin':'', 'Evirgen':'', 'Evnur':'', 'Evren':'', 'Evrensel':'', 'Evrim':'', 'Evrimer':'', 'Evsen':'', 'Evşen':'', 'Eylem':'', 'Eymen':'', 'Eyüp':'', 'Ezel':'', 'Ezelî':'', 'Ezgi':'', 'Ezgin':'', 'Ezgü':'', 'Ezgüer':'', 'Ezgütekin':'', 'Fâkihe':'', 'Fehim':'', 'Fehime':'', 'Fehmi':'', 'Fehmiye':'', 'Felât':'', 'Felek':'', 'Fenni':'', 'Fenniye':'', 'Fer':'', 'Fergün':'', 'Ferhun':'', 'Feri':'', 'Ferih':'',
<gh_stars>1-10 """ Utils for Postgres. Most useful are: :func:`read_from_pg`, :func:`write_to_pg`, :func:`execute_batch` """ from typing import Dict, List, Set, Optional, Iterator, Iterable, Any from contextlib import contextmanager from itertools import chain from logging import Logger import jaydebeapi import datetime import string import functools from pyspark.sql import SQLContext from pyspark import SparkContext from pyspark.sql import DataFrame from pyspark_db_utils.utils.drop_columns import drop_other_columns def read_from_pg(config: dict, sql: str, sc: SparkContext, logger: Optional[Logger]=None) -> DataFrame: """ Read dataframe from postgres Args: config: settings for connect sql: sql to read, it may be one of these format - 'table_name' - 'schema_name.table_name' - '(select a, b, c from t1 join t2 ...) as foo' sc: specific current spark_context or None logger: logger Returns: selected DF """ if logger: logger.info('read_from_pg:\n{}'.format(sql)) sqlContext = SQLContext(sc) df = sqlContext.read.format("jdbc").options( url=config['PG_URL'], dbtable=sql, **config['PG_PROPERTIES'] ).load().cache() return df def write_to_pg(df: DataFrame, config: dict, table: str, mode: str='append', logger: Optional[Logger]=None) -> None: """ Write dataframe to postgres Args: df: DataFrame to write config: config dict table: table_name logger: logger mode: mode, one of these: - append - create table if not exists (with all columns of DataFrame) and write records to table (using fields only in table columns) - overwrite - truncate table (if exists) and write records (using fields only in table columns) - overwrite_full - drop table and create new one with all columns and DataFrame and append records to it - fail - fail if table is not exists, otherwise append records to it """ field_names = get_field_names(table, config) table_exists = bool(field_names) if mode == 'fail': if not table_exists: raise Exception('table {} does not exist'.format(table)) else: mode = 'append' # if table exists just append records to it if mode == 'append': if table_exists: df = drop_other_columns(df, field_names) elif mode == 'overwrite_full': if table_exists: run_sql('drop table {}'.format(table), config, logger=logger) elif mode == 'overwrite': if table_exists: df = drop_other_columns(df, field_names) run_sql('truncate {}'.format(table), config, logger=logger) df.write.jdbc(url=config['PG_URL'], table=table, mode='append', # always just append because all logic already done properties=config['PG_PROPERTIES']) def run_sql(sql: str, config: Dict, logger: Optional[Logger]=None) -> None: """ just run sql """ if logger: logger.info('run_sql: {}'.format(sql)) with jdbc_connect(config, autocommit=True) as (conn, curs): curs.execute(sql) def get_field_names(table_name: str, config: Dict) -> Set[str]: """ get field names of table """ if len(table_name.split('.')) > 1: table_name = table_name.split('.')[-1] with jdbc_connect(config) as (conn, cur): sql = "SELECT column_name FROM information_schema.columns WHERE table_name='{}'".format(table_name) cur.execute(sql) res = cur.fetchall() field_names = list(chain(*res)) return set(field_names) def get_field_names_stub(df: DataFrame, config: Dict, table_name: str, sc: SparkContext) -> Set[str]: """ get field names of table ! DONT USE IT ! Use get_field_names instead ! TODO: replace with get_field_names """ sql = '(select * from {} limit 1) as smth'.format(table_name) df_tmp = read_from_pg(config, sql, sc) columns_in_db = set(df_tmp.columns) columns_in_df = set(df.columns) field_names = columns_in_db.intersection(columns_in_df) return set(field_names) @contextmanager def jdbc_connect(config: Dict, autocommit: bool=False): """ context manager, opens and closes connection correctly Args: config: config autocommit: enable autocommit Yields: tuple: connection, cursor """ conn = jaydebeapi.connect(config["PG_PROPERTIES"]['driver'], config["PG_URL"], {'user': config["PG_PROPERTIES"]['user'], 'password': config["PG_PROPERTIES"]['password']}, config["PG_DRIVER_PATH"] ) if not autocommit: conn.jconn.setAutoCommit(False) curs = conn.cursor() yield conn, curs curs.close() conn.close() def mogrify(val) -> str: """ cast python values to raw-sql correctly and escape if necessary Args: val: some value Returns: mogrified value """ if isinstance(val, str): escaped = val.replace("'", "''") return "'{}'".format(escaped) elif isinstance(val, (int, float)): return str(val) elif isinstance(val, datetime.datetime): return "'{}'::TIMESTAMP".format(val) elif isinstance(val, datetime.date): return "'{}'::DATE".format(val) elif val is None: return 'null' else: raise TypeError('unknown type {} for mogrify'.format(type(val))) class MogrifyFormatter(string.Formatter): """ custom formatter to mogrify {}-like formatting strings """ def get_value(self, key, args, kwargs) -> str: row = args[0] return mogrify(row[key]) def batcher(iterable: Iterable, batch_size: int): """ yields batches of iterable Args: iterable: something to batch batch_size: batch size Yields: batch, until end of iterable """ batch = [] for obj in iterable: batch.append(obj) if len(batch) == batch_size: yield batch batch = [] if batch: yield batch mogrifier = MogrifyFormatter() def _execute_batch_partition(partition: Iterator, sql_temp: str, config: Dict, batch_size: int) -> None: """ execute sql_temp for rows in partition in batch """ # For debugging RAM # def get_ram(): # import os # ram = os.popen('free -m').read() # return ram with jdbc_connect(config) as (conn, curs): for batch in batcher(partition, batch_size): sql = ';'.join( mogrifier.format(sql_temp, row) for row in batch ) # if config.get('DEBUG_SQL'): print('\n\nsql: {}\n\n'.format(sql[:500])) curs.execute(sql) # print('\nFREE RAM: %s\n' % get_ram()) conn.commit() def execute_batch(df: DataFrame, sql_temp: str, config: Dict, batch_size: int=1000) -> None: """ Very useful function to run custom SQL on each rows in DataFrame by batches. For example UPDATE / DELETE / etc Attention! It's expecting that sql_temp string using {} like formatting (because it's easy to overload it by custom formatter. execute_batch replace {field} by field value for each row in DataFrame. So, if you want to make some formatting (such as table_name or constant values) you should use %()s formatting. Examples: update table rows by id and values for DF records:: >> execute_batch(df, config=config, sql_temp='update %(table_name)s set out_date=%(filename_date)s where id={id}' % {'table_name': table_name, 'filename_date': filename_date}) update table rows fields by complex sql expression:: >> execute_batch(df=df, sql_temp=''' UPDATE reporting.cases c SET close_date = {check_date_time}, status = 2, lost_sales = EXTRACT(epoch FROM {check_date_time} - c.start_date) * (3.0 / 7) / (24 * 3600) WHERE c.id = {id} ''', config=config) """ df.foreachPartition( functools.partial(_execute_batch_partition, sql_temp=sql_temp, config=config, batch_size=batch_size)) def _update_many_partition(partition: Iterator, table_name: str, set_to: Dict[str, Any], config: Dict, batch_size: int, id_field: str='id' ) -> None: """ Update rows in partition. Set some fields to the new same-values. Args: partition: DataFrame partition table_name: table name set_to: dict such as {'field_name1': new_value1, 'field_name2': new_value2} config: config batch_size: batch size id_field: id field """ field_stmt_list = [] for field_name, new_value in set_to.items(): field_stmt_list.append('{}={}'.format(field_name, mogrify(new_value))) fields_stmt = ', '.join(field_stmt_list) with jdbc_connect(config) as (conn, curs): for batch in batcher(partition, batch_size): ids = [row[id_field] for row in batch] if not ids: break ids_str = ', '.join(str(id_) for id_ in ids) sql = 'UPDATE {} SET {} WHERE id IN ({})'.format(table_name, fields_stmt, ids_str) curs.execute(sql) conn.commit() def update_many(df: DataFrame, table_name: str, set_to: Dict, config: Dict, batch_size: int=1000, id_field: str='id' ) -> None: """ Update rows in DataFrame. Set some fields to the new constant same-values. Note: this function update fields to constant values, if you want to make some update-sql-expression, use execute_batch Args: df: DataFrame table_name: table name set_to: dict such as {'field_name1': new_const_value1, 'field_name2': new_const_value2} config: config batch_size: batch size id_field: id field """ df.foreachPartition( functools.partial(_update_many_partition, table_name=table_name, set_to=set_to, config=config, batch_size=batch_size, id_field=id_field)) def _insert_values_partition(partition: Iterator, sql_temp: str, values_temp: str, config: Dict, batch_size: int, fields_stmt: Optional[str]=None, table_name: Optional[str]=None, logger: Optional[Logger]=None): """ Insert rows from partition. Args: partition: DataFrame partition sql_temp: sql template (may consist values, fields, table_name formatting-arguments) values_temp: string template for values config: config batch_size: batch size fields_stmt: string template for fields table_name: table name argument for string-formatting """ with jdbc_connect(config) as (conn, curs): for batch in batcher(partition, batch_size): values = ','.join( mogrifier.format(values_temp, row) for row in batch ) sql = sql_temp.format(values=values, fields=fields_stmt, table_name=table_name) if logger: max_len = 1024 logger.info('_insert_values_partition sql[:{}]: {}'.format(max_len, sql[:max_len])) curs.execute(sql) conn.commit() def insert_values(df: DataFrame, config: Dict, batch_size: int=1000, fields: Optional[List[str]]=None, values_temp: Optional[str]=None, sql_temp: Optional[str]=None, table_name: Optional[str]=None, on_conflict_do_nothing: bool=False, on_conflict_do_update: bool=False, drop_duplicates: bool=False, exclude_null_field: Optional[str]=None, logger: Optional[Logger]=None, sc: SparkContext=None ) -> None: """ Insert rows from DataFrame. Note: Use write_to_pg as often as possible. Unfortunately, it's not able to use ON CONFLICT and ON UPDATE statements, so we are forced to write custom function. Args: df: DataFrame sql_temp: sql template (may consist values, fields, table_name formatting-arguments) values_temp: string template for values config: config fields: list of columns for insert (if None, all olumns will be used) batch_size: batch size table_name: table name argument for string-formatting on_conflict_do_nothing: add ON CONFLICT DO NOTHING statement to each INSERT on_conflict_do_update: add ON CONFLICT DO UPDATE statement to each INSERT drop_duplicates: drop duplicates if set to True exclude_null_field: exclude rows where field=exclude_null_field is null logger: logger sc: Spark context """ # prevent changing original dataframe cleaned_df = df.select(*df.columns) # select columns to write if table_name: field_names = get_field_names_stub(df, config, table_name, sc) cleaned_df = df.select(*field_names) if drop_duplicates: cleaned_df = cleaned_df.dropDuplicates(drop_duplicates) if exclude_null_field: spark = SQLContext(sc)
#!/usr/bin/env python # Copyright (c) 2014, Palo Alto Networks # # Permission to use, copy, modify, and/or distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """Objects module contains objects that exist in the 'Objects' tab in the firewall GUI""" import logging import re import xml.etree.ElementTree as ET import panos import panos.errors as err from panos import getlogger from panos.base import ENTRY, MEMBER, PanObject, Root from panos.base import VarPath as Var from panos.base import VersionedPanObject, VersionedParamPath logger = getlogger(__name__) class AddressObject(VersionedPanObject): """Address Object Args: name (str): Name of the object value (str): IP address or other value of the object type (str): Type of address: * ip-netmask (default) * ip-range * ip-wildcard (added in PAN-OS 9.0) * fqdn description (str): Description of this object tag (list): Administrative tags """ ROOT = Root.VSYS SUFFIX = ENTRY def _setup(self): # xpaths self._xpaths.add_profile(value="/address") # params params = [] params.append(VersionedParamPath("value", path="{type}")) params.append( VersionedParamPath( "type", default="ip-netmask", values=["ip-netmask", "ip-range", "ip-wildcard", "fqdn"], path="{type}", ) ) params.append(VersionedParamPath("description", path="description")) params.append(VersionedParamPath("tag", path="tag", vartype="member")) self._params = tuple(params) class AddressGroup(VersionedPanObject): """Address Group Args: name (str): Name of the address group static_value (list): Values for a static address group dynamic_value (str): Registered-ip tags for a dynamic address group description (str): Description of this object tag (list): Administrative tags (not to be confused with registered-ip tags) """ ROOT = Root.VSYS SUFFIX = ENTRY def _setup(self): # xpaths self._xpaths.add_profile(value="/address-group") # params params = [] params.append( VersionedParamPath("static_value", path="static", vartype="member") ) params.append(VersionedParamPath("dynamic_value", path="dynamic/filter")) params.append(VersionedParamPath("description", path="description")) params.append(VersionedParamPath("tag", path="tag", vartype="member")) self._params = tuple(params) class Tag(VersionedPanObject): """Administrative tag Args: name (str): Name of the tag color (str): Color ID (eg. 'color1', 'color4', etc). You can use :func:`~panos.objects.Tag.color_code` to generate the ID. comments (str): Comments """ ROOT = Root.VSYS SUFFIX = ENTRY def _setup(self): # xpaths self._xpaths.add_profile(value="/tag") # params params = [] params.append(VersionedParamPath("color", path="color")) params.append(VersionedParamPath("comments", path="comments")) self._params = tuple(params) @staticmethod def color_code(color_name): """Return the color code for a color Args: color_name (str): One of the following colors: * red * green * blue * yellow * copper * orange * purple * gray * light green * cyan * light gray * blue gray * lime * black * gold * brown """ colors = { "red": 1, "green": 2, "blue": 3, "yellow": 4, "copper": 5, "orange": 6, "purple": 7, "gray": 8, "light green": 9, "cyan": 10, "light gray": 11, "blue gray": 12, "lime": 13, "black": 14, "gold": 15, "brown": 16, "olive": 17, # there is no color18 "maroon": 19, "red-orange": 20, "yellow-orange": 21, "forest green": 22, "turquoise blue": 23, "azure blue": 24, "cerulean blue": 25, "midnight blue": 26, "medium blue": 27, "cobalt blue": 28, "violet blue": 29, "blue violet": 30, "medium violet": 31, "medium rose": 32, "lavender": 33, "orchid": 34, "thistle": 35, "peach": 36, "salmon": 37, "magenta": 38, "red violet": 39, "mahogany": 40, "burnt sienna": 41, "chestnut": 42, } if color_name not in colors: raise ValueError("Color '{0}' is not valid".format(color_name)) return "color" + str(colors[color_name]) class ServiceObject(VersionedPanObject): """Service Object Args: name (str): Name of the object protocol (str): Protocol of the service, either tcp or udp source_port (str): Source port of the protocol, if any destination_port (str): Destination port of the service description (str): Description of this object tag (list): Administrative tags """ ROOT = Root.VSYS SUFFIX = ENTRY def _setup(self): # xpaths self._xpaths.add_profile(value="/service") # params params = [] params.append( VersionedParamPath( "protocol", path="protocol/{protocol}", values=["tcp", "udp"], default="tcp", ) ) params.append( VersionedParamPath("source_port", path="protocol/{protocol}/source-port") ) params.append( VersionedParamPath("destination_port", path="protocol/{protocol}/port") ) params.append(VersionedParamPath("description", path="description")) params.append(VersionedParamPath("tag", path="tag", vartype="member")) self._params = tuple(params) class ServiceGroup(VersionedPanObject): """ServiceGroup Object Args: name (str): Name of the object value (list): List of service values tag (list): Administrative tags """ ROOT = Root.VSYS SUFFIX = ENTRY def _setup(self): # xpaths self._xpaths.add_profile(value="/service-group") # params params = [] params.append(VersionedParamPath("value", path="members", vartype="member")) params.append(VersionedParamPath("tag", path="tag", vartype="member")) self._params = tuple(params) class ApplicationObject(VersionedPanObject): """Application Object Args: name (str): Name of the object category (str): Application category subcategory (str): Application subcategory technology (str): Application technology risk (int): Risk (1-5) of the application default_type (str): Default identification type of the application default_value (list): Values for the default type parent_app (str): Parent Application for which this app falls under timeout (int): Default timeout tcp_timeout (int): TCP timeout udp_timeout (int): UDP timeout tcp_half_closed_timeout (int): TCP half closed timeout tcp_time_wait_timeout (int): TCP wait time timeout evasive_behavior (bool): Applicaiton is actively evasive consume_big_bandwidth (bool): Application uses large bandwidth used_by_malware (bool): Application is used by malware able_to_transfer_file (bool): Application can do file transfers has_known_vulnerability (bool): Application has known vulnerabilities tunnel_other_application (bool): tunnel_applications (list): List of tunneled applications prone_to_misuse (bool): pervasive_use (bool): file_type_ident (bool): virus_ident (bool): data_ident (bool): description (str): Description of this object tag (list): Administrative tags Please refer to https://applipedia.paloaltonetworks.com/ for more info on these params """ ROOT = Root.VSYS SUFFIX = ENTRY def _setup(self): # xpaths self._xpaths.add_profile(value="/application") # params params = [] params.append(VersionedParamPath("category", path="category")) params.append(VersionedParamPath("subcategory", path="subcategory")) params.append(VersionedParamPath("technology", path="technology")) params.append(VersionedParamPath("risk", path="risk", vartype="int")) params.append( VersionedParamPath( "default_type", path="default/{default_type}", values=[ "port", "ident-by-ip-protocol", "ident-by-icmp-type", "ident-by-icmp6-type", ], ) ) params.append( VersionedParamPath( "default_port", path="default/{default_type}", vartype="member", condition={"default_type": "port"}, ) ) params.append( VersionedParamPath( "default_ip_protocol", path="default/{default_type}", condition={"default_type": "ident-by-ip-protocol"}, ) ) params.append( VersionedParamPath( "default_icmp_type", path="default/{default_type}/type", vartype="int", condition={ "default_type": ["ident-by-icmp-type", "ident-by-icmp6-type"] }, ) ) params.append( VersionedParamPath( "default_icmp_code", path="default/{default_type}/code", vartype="int", condition={ "default_type": ["ident-by-icmp-type", "ident-by-icmp6-type"] }, ) ) params.append(VersionedParamPath("parent_app", path="parent-app")) params.append(VersionedParamPath("timeout", path="timeout", vartype="int")) params.append( VersionedParamPath("tcp_timeout", path="tcp-timeout", vartype="int") ) params.append( VersionedParamPath("udp_timeout", path="udp-timeout", vartype="int") ) params.append( VersionedParamPath( "tcp_half_closed_timeout", path="tcp-half-closed-timeout", vartype="int" ) ) params.append( VersionedParamPath( "tcp_time_wait_timeout", path="tcp-time-wait-timeout", vartype="int" ) ) params.append( VersionedParamPath( "evasive_behavior", path="evasive-behavior", vartype="yesno" ) ) params.append( VersionedParamPath( "consume_big_bandwidth", path="consume-big-bandwidth", vartype="yesno" ) ) params.append( VersionedParamPath( "used_by_malware", path="used-by-malware", vartype="yesno" ) ) params.append( VersionedParamPath( "able_to_transfer_file", path="able-to-transfer-file", vartype="yesno" ) ) params.append( VersionedParamPath( "has_known_vulnerability", path="has-known-vulnerability", vartype="yesno", ) ) params.append( VersionedParamPath( "tunnel_other_application", path="tunnel-other-application", vartype="yesno", ) ) params.append( VersionedParamPath( "tunnel_applications", path="tunnel-applications", vartype="member" ) ) params.append( VersionedParamPath( "prone_to_misuse", path="prone-to-misuse", vartype="yesno" ) ) params.append( VersionedParamPath("pervasive_use", path="pervasive-use", vartype="yesno") ) params.append( VersionedParamPath( "file_type_ident", path="file-type-ident", vartype="yesno" ) ) params.append( VersionedParamPath("virus_ident", path="virus-ident", vartype="yesno") ) params.append( VersionedParamPath("data_ident", path="data-ident", vartype="yesno") ) params.append(VersionedParamPath("description", path="description")) params.append(VersionedParamPath("tag", path="tag", vartype="member")) self._params = tuple(params) class ApplicationGroup(VersionedPanObject): """ApplicationGroup Object Args: name (str): Name of the object value (list): List of application values tag (list): Administrative tags """ ROOT = Root.VSYS SUFFIX = ENTRY def _setup(self): # xpaths self._xpaths.add_profile(value="/application-group") # params params = [] params.append(VersionedParamPath("value", path="members", vartype="member")) params.append(VersionedParamPath("tag", path="tag", vartype="member")) self._params = tuple(params) class ApplicationFilter(VersionedPanObject): """ApplicationFilter Object Args: name (str): Name of the object category (list): Application category subcategory (list): Application subcategory technology (list): Application technology risk (list): Application risk evasive (bool): excessive_bandwidth_use (bool): prone_to_misuse (bool): is_saas (bool): transfers_files (bool): tunnels_other_apps (bool): used_by_malware (bool): has_known_vulnerabilities (bool): pervasive (bool): tag (list): Administrative tags """ ROOT = Root.VSYS SUFFIX = ENTRY def _setup(self): # xpaths self._xpaths.add_profile(value="/application-filter") # params params = [] params.append(VersionedParamPath("category", path="category", vartype="member")) params.append( VersionedParamPath("subcategory", path="subcategory", vartype="member") ) params.append( VersionedParamPath("technology", path="technology", vartype="member") ) params.append(VersionedParamPath("risk", path="risk", vartype="member")) params.append(VersionedParamPath("evasive", path="evasive", vartype="yesno")) params.append( VersionedParamPath( "excessive_bandwidth_use", path="excessive-bandwidth-use", vartype="yesno", ) ) params.append( VersionedParamPath( "prone_to_misuse", path="prone-to-misuse", vartype="yesno" ) ) params.append(VersionedParamPath("is_saas", path="is-saas", vartype="yesno")) params.append( VersionedParamPath( "transfers_files", path="transfers-files", vartype="yesno" ) ) params.append( VersionedParamPath( "tunnels_other_apps", path="tunnels-other-apps", vartype="yesno" ) ) params.append( VersionedParamPath( "used_by_malware", path="used-by-malware", vartype="yesno" ) ) params.append( VersionedParamPath( "has_known_vulnerabilities", path="has-known-vulnerabilities", vartype="yesno", ) ) params.append( VersionedParamPath("pervasive", path="pervasive", vartype="yesno") ) params.append(VersionedParamPath("tag", path="tag", vartype="member")) self._params = tuple(params) class ApplicationContainer(VersionedPanObject): """ApplicationContainer object This is a special class that is used in the predefined module. It acts much like an ApplicationGroup object but exists only in the predefined context. It is more or less a way that Palo Alto groups predefined applications together. Args: applications (list): List of memeber applications """ ROOT = Root.VSYS SUFFIX = ENTRY def _setup(self): # xpaths self._xpaths.add_profile(value="/application-container") # params params
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Tests for Apache(TM) Bloodhound's product environments""" from inspect import stack import os.path import shutil from sqlite3 import OperationalError import sys import tempfile from types import MethodType if sys.version_info < (2, 7): import unittest2 as unittest from unittest2.case import _AssertRaisesContext else: import unittest from unittest.case import _AssertRaisesContext from trac.config import Option from trac.core import Component, ComponentMeta from trac.env import Environment from trac.test import EnvironmentStub, MockPerm from trac.tests.env import EnvironmentTestCase from trac.ticket.report import ReportModule from trac.ticket.web_ui import TicketModule from trac.util.text import to_unicode from trac.web.href import Href from multiproduct.api import MultiProductSystem from multiproduct.env import ProductEnvironment from multiproduct.model import Product class ProductEnvironmentStub(ProductEnvironment): r"""A product environment slightly tweaked for testing purposes """ def get_known_users(self, cnx=None): return self.known_users # FIXME: Subclass TestCase explictly ? class MultiproductTestCase(unittest.TestCase): r"""Mixin providing access to multi-product testing extensions. This class serves to the purpose of upgrading existing Trac test cases with multi-product super-powers while still providing the foundations to create product-specific subclasses. """ # unittest2 extensions exceptFailureMessage = None class _AssertRaisesLoggingContext(_AssertRaisesContext): """Add logging capabilities to assertRaises """ def __init__(self, expected, test_case, expected_regexp=None): _AssertRaisesContext.__init__(self, expected, test_case, expected_regexp) self.test_case = test_case @staticmethod def _tb_locals(tb): if tb is None: # Inspect interpreter stack two levels up ns = stack()[2][0].f_locals.copy() else: # Traceback already in context ns = tb.tb_frame.f_locals.copy() ns.pop('__builtins__', None) return ns def __exit__(self, exc_type, exc_value, tb): try: return _AssertRaisesContext.__exit__(self, exc_type, exc_value, tb) except self.failureException, exc: msg = self.test_case.exceptFailureMessage if msg is not None: standardMsg = str(exc) msg = msg % self._tb_locals(tb) msg = self.test_case._formatMessage(msg, standardMsg) raise self.failureException(msg) else: raise finally: # Clear message placeholder self.test_case.exceptFailureMessage = None def assertRaises(self, excClass, callableObj=None, *args, **kwargs): """Adds logging capabilities on top of unittest2 implementation. """ if callableObj is None: return self._AssertRaisesLoggingContext(excClass, self) else: return unittest.TestCase.assertRaises(self, excClass, callableObj, *args, **kwargs) # Product data default_product = 'tp1' MAX_TEST_PRODUCT = 3 PRODUCT_DATA = { 'tp1': { 'prefix': 'tp1', 'name': 'test product 1', 'description': 'desc for tp1', }, 'tp2': { 'prefix': 'tp2', 'name': 'test product 2', 'description': 'desc for tp2', }, u'xü': { 'prefix': u'xü', 'name': 'Non-ASCII chars', 'description': 'Unicode chars in name', }, u'Überflüssigkeit': { 'prefix': u'Überflüssigkeit', 'name': 'Non-ASCII chars (long)', 'description': 'Long name with unicode chars', }, 'Foo Bar': { 'prefix': 'Foo Bar', 'name': 'Whitespaces', 'description': 'Whitespace chars in name', }, 'Foo Bar#baz': { 'prefix': 'Foo Bar#baz', 'name': 'Non-alphanumeric', 'description': 'Special chars in name', }, 'pl/de': { 'prefix': 'pl/de', 'name': 'Path separator', 'description': 'URL path separator in name', }, } # Test setup def _setup_test_env(self, create_folder=True, path=None, **kwargs): r"""Prepare a new test environment . Optionally set its path to a meaningful location (temp folder if `path` is `None`). """ MultiProductSystem.FakePermClass = MockPerm kwargs.setdefault('enable', ['trac.*', 'multiproduct.*']) self.env = env = EnvironmentStub(**kwargs) if create_folder: if path is None: env.path = tempfile.mkdtemp('bh-product-tempenv') else: env.path = path if not os.path.exists(path): os.mkdir(path) return env def _setup_test_log(self, env): r"""Ensure test product with prefix is loaded """ logdir = tempfile.gettempdir() logpath = os.path.join(logdir, 'trac-testing.log') config = env.config config.set('logging', 'log_file', logpath) config.set('logging', 'log_type', 'file') config.set('logging', 'log_level', 'DEBUG') # Log SQL queries config.set('trac', 'debug_sql', True) config.save() env.setup_log() env.log.info('%s test case: %s %s', '-' * 10, self.id(), '-' * 10) # Clean-up logger instance and associated handler # Otherwise large test suites will only result in ERROR eventually # (at least in Unix systems) with messages # # TracError: Error reading '/path/to/file', make sure it is readable. # error: /path/to/: Too many open files self.addCleanup(self._teardown_test_log, env) def _teardown_test_log(self, env): if env.log and hasattr(env, '_log_handler'): env.log.removeHandler(env._log_handler) env._log_handler.flush() env._log_handler.close() del env._log_handler @classmethod def _load_product_from_data(cls, env, prefix): r"""Ensure test product with prefix is loaded """ # TODO: Use fixtures implemented in #314 product_data = cls.PRODUCT_DATA[prefix] prefix = to_unicode(prefix) product = Product(env) product._data.update(product_data) product.insert() @classmethod def _upgrade_mp(cls, env): r"""Apply multi product upgrades """ # Do not break wiki parser ( see #373 ) env.disable_component(TicketModule) env.disable_component(ReportModule) mpsystem = MultiProductSystem(env) try: mpsystem.upgrade_environment(env.db_transaction) except OperationalError: # Database is upgraded, but database version was deleted. # Complete the upgrade by inserting default product. mpsystem._insert_default_product(env.db_transaction) # assume that the database schema has been upgraded, enable # multi-product schema support in environment env.enable_multiproduct_schema(True) @classmethod def _load_default_data(cls, env): r"""Initialize environment with default data by respecting values set in system table. """ from trac import db_default env.log.debug('Loading default data') with env.db_transaction as db: for table, cols, vals in db_default.get_data(db): if table != 'system': db.executemany('INSERT INTO %s (%s) VALUES (%s)' % (table, ','.join(cols), ','.join(['%s' for c in cols])), vals) env.log.debug('Loaded default data') def _mp_setup(self, **kwargs): """Shortcut for quick product-aware environment setup. """ self.env = self._setup_test_env(**kwargs) self._upgrade_mp(self.env) self._setup_test_log(self.env) self._load_product_from_data(self.env, self.default_product) class ProductEnvTestCase(EnvironmentTestCase, MultiproductTestCase): r"""Test cases for Trac environments rewritten for product environments """ # Test setup def setUp(self): r"""Replace Trac environment with product environment """ EnvironmentTestCase.setUp(self) try: self.global_env = self.env self._setup_test_log(self.global_env) self._upgrade_mp(self.global_env) self._load_product_from_data(self.global_env, self.default_product) try: self.env = ProductEnvironment(self.global_env, self.default_product) except: # All tests should fail if anything goes wrong self.global_env.log.exception( 'Error creating product environment') self.env = None except: shutil.rmtree(self.env.path) raise def tearDown(self): # Discard product environment self.env = self.global_env EnvironmentTestCase.tearDown(self) class ProductEnvApiTestCase(MultiproductTestCase): """Assertions for Apache(TM) Bloodhound product-specific extensions in [https://issues.apache.org/bloodhound/wiki/Proposals/BEP-0003 BEP 3] """ def setUp(self): self._mp_setup() self.product_env = ProductEnvironment(self.env, self.default_product) def tearDown(self): # Release reference to transient environment mock object if self.env is not None: try: self.env.reset_db() except OperationalError: # "Database not found ...", # "OperationalError: no such table: system" or the like pass self.env = None self.product_env = None def test_attr_forward_parent(self): """Testing env.__getattr__""" class EnvironmentAttrSandbox(EnvironmentStub): """Limit the impact of class edits so as to avoid race conditions """ self.longMessage = True class AttrSuccess(Exception): """Exception raised when target method / property is actually invoked. """ def property_mock(attrnm, expected_self): def assertAttrFwd(instance): self.assertIs(instance, expected_self, "Mismatch in property '%s'" % (attrnm,)) raise AttrSuccess return property(assertAttrFwd) self.env.__class__ = EnvironmentAttrSandbox try: for attrnm in 'system_info_providers secure_cookies ' \ 'project_admin_trac_url get_system_info get_version ' \ 'get_templates_dir get_templates_dir get_log_dir ' \ 'backup'.split(): original = getattr(Environment, attrnm) if isinstance(original, MethodType): translation = getattr(self.product_env, attrnm) self.assertIs(translation.im_self, self.env, "'%s' not bound to global env in product env" % (attrnm,)) self.assertIs(translation.im_func, original.im_func, "'%s' function differs in product env" % (attrnm,)) elif isinstance(original, (property, Option)): # Intercept property access e.g. properties, Option, ... setattr(self.env.__class__, attrnm, property_mock(attrnm, self.env)) self.exceptFailureMessage = 'Property %(attrnm)s' with self.assertRaises(AttrSuccess) as cm_test_attr: getattr(self.product_env, attrnm) else: self.fail("Environment member %s has unexpected type" % (repr(original),)) finally: self.env.__class__ = EnvironmentStub for attrnm in 'component_activated _component_rules ' \ 'enable_component get_known_users get_repository ' \ '_component_name'.split(): original = getattr(Environment, attrnm) if isinstance(original, MethodType): translation = getattr(self.product_env, attrnm) self.assertIs(translation.im_self, self.product_env, "'%s' not bound to product env" % (attrnm,)) self.assertIs(translation.im_func, original.im_func, "'%s' function differs in product env" % (attrnm,)) elif isinstance(original, property): translation = getattr(ProductEnvironment, attrnm) self.assertIs(original, translation, "'%s' property differs in product env" % (attrnm,)) def test_typecheck(self): """Testing env.__init__""" self._load_product_from_data(self.env, 'tp2') with self.assertRaises(TypeError) as cm_test: new_env = ProductEnvironment(self.product_env, 'tp2') msg = str(cm_test.exception) expected_msg = "Initializer must be called with " \ "trac.env.Environment instance as first argument " \ "(got multiproduct.env.ProductEnvironment instance " \ "instead)" self.assertEqual(msg, expected_msg) def test_component_enable(self): """Testing env.is_component_enabled""" class C(Component): pass # Let's pretend this was declared elsewhere C.__module__ = 'dummy_module' global_env = self.env product_env = self.product_env def _test_component_enabled(cls): cname = global_env._component_name(cls) disable_component_in_config = global_env.disable_component_in_config enable_component_in_config = global_env.enable_component_in_config # cls initially disabled in both envs disable_component_in_config(global_env, cls) disable_component_in_config(product_env, cls) expected_rules = { 'multiproduct': True, 'trac': True, 'trac.db': True, cname: False, } self.assertEquals(expected_rules, global_env._component_rules) self.assertEquals(expected_rules, product_env._component_rules) self.assertFalse(global_env.is_component_enabled(cls)) self.assertFalse(product_env.is_component_enabled_local(cls)) self.assertIs(global_env[cls], None) self.assertIs(product_env[cls], None) # cls enabled in product env but not in global env
'v0', SV.simZero) return self.add_logmsg(iaddr, simstate, str(a0)) class MIPStub_setrlimit(MIPSimStub): def __init__(self) -> None: MIPSimStub.__init__(self, 'setrlimit') def simulate(self, iaddr: str, simstate: "SimulationState") -> str: a0 = self.get_arg_val(iaddr, simstate, 'a0') a1 = self.get_arg_val(iaddr, simstate, 'a1') pargs = ','.join(str(a) for a in [a0, a1]) simstate.set_register(iaddr, 'v0', SV.simZero) return self.add_logmsg(iaddr, simstate, pargs) class MIPStub_setrlimit64(MIPSimStub): def __init__(self) -> None: MIPSimStub.__init__(self, 'setrlimit64') def simulate(self, iaddr: str, simstate: "SimulationState") -> str: a0 = self.get_arg_val(iaddr, simstate, 'a0') a1 = self.get_arg_val(iaddr, simstate, 'a1') pargs = ','.join(str(a) for a in [a0, a1]) simstate.set_register(iaddr, 'v0', SV.simZero) return self.add_logmsg(iaddr, simstate, pargs) class MIPStub_setsid(MIPSimStub): def __init__(self) -> None: MIPSimStub.__init__(self, "setsid") def simulate(self, iaddr: str, simstate: "SimulationState") -> str: simstate.set_register(iaddr, "v0", SV.simOne) return self.add_logmsg(iaddr, simstate, "", returnval="1") class MIPStub_setsockopt(MIPSimStub): def __init__(self) -> None: MIPSimStub.__init__(self, 'setsockopt') def is_network_operation(self) -> bool: return True def simulate(self, iaddr: str, simstate: "SimulationState") -> str: a0 = self.get_arg_val(iaddr, simstate, 'a0') a1 = self.get_arg_val(iaddr, simstate, 'a1') a2 = self.get_arg_val(iaddr, simstate, 'a2') a3 = self.get_arg_val(iaddr, simstate, 'a3') a4 = self.get_stack_arg_val(iaddr, simstate, 4) pargs = ','.join(str(a) for a in [a0, a1, a2, a3, a4]) simstate.set_register(iaddr, 'v0', SV.simZero) return self.add_logmsg(iaddr, simstate, pargs) class MIPStub_shmat(MIPSimStub): def __init__(self) -> None: MIPSimStub.__init__(self, "shmat") def is_sharedmem_operation(self) -> bool: return True def simulate(self, iaddr: str, simstate: "SimulationState") -> str: a0 = self.get_arg_val(iaddr, simstate, "a0") # int shmid a1 = self.get_arg_val(iaddr, simstate, "a1") # const void *shmaddr a2 = self.get_arg_val(iaddr, simstate, "a2") # int shmflg pargs = ','.join(str(a) for a in [a0, a1, a2]) if ( a0.is_defined and a1.is_defined and a2.is_defined and a0.is_literal and a1.is_literal and a2.is_literal): addrval = a1.literal_value shmid = a0.literal_value memname = "shared:" + str(shmid) if addrval == 0: addr = cast(SSV.SimGlobalAddress, SSV.nullpointer) else: addr = SSV.mk_global_address(addrval, memname) result = simstate.simsupport.sharedmem_shmat( iaddr, simstate, a0.literal_value, addr, a2.literal_value) else: result = SSV.mk_undefined_global_address(memname) if result.is_defined and shmid in simstate.sharedmem: simstate.sharedmem[shmid].set_baseoffset(result.literal_value) else: simstate.add_logmsg( "warning", iaddr + ": attaching shared memory for shmid=" + str(a0.literal_value) + " failure") simstate.set_register(iaddr, "v0", result) return self.add_logmsg(iaddr, simstate, pargs, returnval=str(result)) class MIPStub_shmctl(MIPSimStub): def __init__(self) -> None: MIPSimStub.__init__(self, "shmctl") def is_sharedmem_operation(self) -> bool: return True def simulate(self, iaddr: str, simstate: "SimulationState") -> str: a0 = self.get_arg_val(iaddr, simstate, "a0") # int shmid a1 = self.get_arg_val(iaddr, simstate, "a1") # int cmd a2 = self.get_arg_val(iaddr, simstate, "a2") # struct shmid_ds *buf pargs = ",".join(str(a) for a in [a0, a1, a2]) if ( a0.is_defined and a1.is_defined and a2.is_defined and a0.is_literal and a1.is_literal and a2.is_address): a2 = cast(SSV.SimAddress, a2) returnval = simstate.simsupport.sharedmem_shmctl( iaddr, simstate, a0.literal_value, a1.literal_value, a2) else: returnval = -1 simstate.set_register(iaddr, "v0", SV.mk_simvalue(returnval)) return self.add_logmsg(iaddr, simstate, pargs, returnval=str(returnval)) class MIPStub_shmdt(MIPSimStub): def __init__(self) -> None: MIPSimStub.__init__(self, "shmdt") def is_sharedmem_operation(self) -> bool: return True def simulate(self, iaddr: str, simstate: "SimulationState") -> str: a0 = self.get_arg_val(iaddr, simstate, "a0") # const void *shmaddr if a0.is_defined and a0.is_global_address: a0 = cast(SSV.SimGlobalAddress, a0) returnval = simstate.simsupport.sharedmem_shmdt( iaddr, simstate, a0) else: returnval = -1 simstate.set_register(iaddr, "v0", SV.mk_simvalue(returnval)) return self.add_logmsg(iaddr, simstate, str(a0), returnval=str(returnval)) class MIPStub_shmget(MIPSimStub): def __init__(self) -> None: MIPSimStub.__init__(self, 'shmget') self.counter = 0 def is_sharedmem_operation(self) -> bool: return True def simulate(self, iaddr: str, simstate: "SimulationState") -> str: a0 = self.get_arg_val(iaddr, simstate, "a0") # key_t key a1 = self.get_arg_val(iaddr, simstate, "a1") # size_t size a2 = self.get_arg_val(iaddr, simstate, "a2") # int shmflg pargs = ",".join(str(a) for a in [a0, a1, a2]) if ( a0.is_defined and a1.is_defined and a2.is_defined and a0.is_literal and a1.is_literal and a2.is_literal): shmid = simstate.simsupport.sharedmem_shmget( iaddr, simstate, a0.literal_value, a1.literal_value, a2.literal_value) if shmid in simstate.sharedmem: simstate.add_logmsg( "warning", iaddr + ": Shared memory already exists for shmid=" + str(shmid)) else: simstate.sharedmem[shmid] = SimSharedMemory( simstate, shmid, hex(a0.literal_value), a1.literal_value) else: shmid = -1 simstate.add_logmsg( "warning", iaddr + ": Attempt to gain access to shared memory failed") simstate.set_register(iaddr, "v0", SV.mk_simvalue(shmid)) return self.add_logmsg(iaddr, simstate, pargs, returnval=str(shmid)) class MIPStub_sigaction(MIPSimStub): def __init__(self, name: str = 'sigaction') -> None: MIPSimStub.__init__(self, name) def is_io_operation(self) -> bool: return True def simulate(self, iaddr: str, simstate: "SimulationState") -> str: a0 = self.get_arg_val(iaddr, simstate, 'a0') a1 = self.get_arg_val(iaddr, simstate, 'a1') a2 = self.get_arg_val(iaddr, simstate, 'a2') pargs = ','.join(str(a) for a in [a0, a1, a2]) simstate.set_register(iaddr, 'v0', SV.simZero) return self.add_logmsg(iaddr, simstate, pargs) class MIPStub_sigaddset(MIPSimStub): def __init__(self, name: str = 'sigaddset'): MIPSimStub.__init__(self, name) def simulate(self, iaddr: str, simstate: "SimulationState") -> str: a0 = self.get_arg_val(iaddr, simstate, 'a0') a1 = self.get_arg_val(iaddr, simstate, 'a1') pargs = '.'.join(str(a) for a in [a0, a1]) simstate.set_register(iaddr, 'v0', SV.simZero) return self.add_logmsg(iaddr, simstate, pargs) class MIPStub_sigemptyset(MIPSimStub): def __init__(self) -> None: MIPSimStub.__init__(self, 'sigemptyset') def is_io_operation(self) -> bool: return True def simulate(self, iaddr: str, simstate: "SimulationState") -> str: a0 = self.get_arg_val(iaddr, simstate, 'a0') simstate.set_register(iaddr, 'v0', SV.simZero) return self.add_logmsg(iaddr, simstate, str(a0)) class MIPStub_signal(MIPSimStub): def __init__(self) -> None: MIPSimStub.__init__(self, 'signal') def simulate(self, iaddr: str, simstate: "SimulationState") -> str: a0 = self.get_arg_val(iaddr, simstate, 'a0') a1 = self.get_arg_val(iaddr, simstate, 'a1') pargs = str(a0) + ',' + str(a1) return self.add_logmsg(iaddr, simstate, pargs) class MIPStub_sigprocmask(MIPSimStub): def __init__(self) -> None: MIPSimStub.__init__(self, 'sigprocmask') def simulate(self, iaddr: str, simstate: "SimulationState") -> str: a0 = self.get_arg_val(iaddr, simstate, 'a0') a1 = self.get_arg_val(iaddr, simstate, 'a1') a2 = self.get_arg_val(iaddr, simstate, 'a2') pargs = ','.join(str(a) for a in [a0, a1, a2]) simstate.set_register(iaddr, 'v0', SV.simZero) return self.add_logmsg(iaddr, simstate, pargs) class MIPStub_sleep(MIPSimStub): def __init__(self) -> None: MIPSimStub.__init__(self, 'sleep') def simulate(self, iaddr: str, simstate: "SimulationState") -> str: a0 = self.get_arg_val(iaddr, simstate, 'a0') simstate.set_register(iaddr, 'v0', SV.simZero) return self.add_logmsg(iaddr, simstate, str(a0)) class MIPStub_socket(MIPSimStub): def __init__(self) -> None: MIPSimStub.__init__(self, 'socket') def is_network_operation(self) -> bool: return True def is_io_operation(self) -> bool: return True def simulate(self, iaddr: str, simstate: "SimulationState") -> str: """Returns a symbolic value in v0.""" a0 = self.get_arg_val(iaddr, simstate, 'a0') a1 = self.get_arg_val(iaddr, simstate, 'a1') a2 = self.get_arg_val(iaddr, simstate, 'a2') # returnval = SSV.mk_symbol('socket-fd',minval=0) returnval = SV.mk_simvalue(113) # notable, recognizable value simstate.set_register(iaddr, 'v0', returnval) pargs = ','.join(str(a) for a in [a0, a1, a2]) return self.add_logmsg(iaddr, simstate, pargs) class MIPStub_pthread_cond_init(MIPSimStub): def __init__(self) -> None: MIPSimStub.__init__(self, 'pthread_cond_init') def is_thread_operation(self) -> bool: return True def simulate(self, iaddr: str, simstate: "SimulationState") -> str: a0 = self.get_arg_val(iaddr, simstate, 'a0') a1 = self.get_arg_val(iaddr, simstate, 'a1') pargs = ','.join(str(a) for a in [a0, a1]) simstate.set_register(iaddr, 'v0', SV.simZero) return self.add_logmsg(iaddr, simstate, pargs) class MIPStub_pthread_cond_signal(MIPSimStub): def __init__(self) -> None: MIPSimStub.__init__(self, 'pthread_cond_signal') def is_thread_operation(self) -> bool: return True def simulate(self, iaddr: str, simstate: "SimulationState") -> str: a0 = self.get_arg_val(iaddr, simstate, 'a0') simstate.set_register(iaddr, 'v0', SV.simZero) return self.add_logmsg(iaddr, simstate, str(a0)) class MIPStub_pthread_create(MIPSimStub): def __init__(self) -> None: MIPSimStub.__init__(self, 'pthread_create') def is_thread_operation(self) -> bool: return True def simulate(self, iaddr: str, simstate: "SimulationState") -> str: a0 = self.get_arg_val(iaddr, simstate, 'a0') a1 = self.get_arg_val(iaddr, simstate, 'a1') a2 = self.get_arg_val(iaddr, simstate, 'a2') a3 = self.get_arg_val(iaddr, simstate, 'a3') pargs = ','.join(str(a) for a in [a0, a1, a2, a3]) simstate.set_register(iaddr, 'v0', SV.simZero) return self.add_logmsg(iaddr, simstate, pargs) class MIPStub_pthread_attr_init(MIPSimStub): def __init__(self) -> None: MIPSimStub.__init__(self, 'pthread_attr_init') def is_thread_operation(self) -> bool: return True def simulate(self, iaddr: str, simstate: "SimulationState") -> str: a0 = self.get_arg_val(iaddr, simstate, 'a0') simstate.set_register(iaddr, 'v0', SV.simZero) return self.add_logmsg(iaddr, simstate, str(a0)) class MIPStub_pthread_attr_setschedparam(MIPSimStub): def __init__(self) -> None: MIPSimStub.__init__(self, 'pthread_attr_setschedparam') def is_thread_operation(self) -> bool: return True def simulate(self, iaddr: str, simstate: "SimulationState") -> str: a0 = self.get_arg_val(iaddr, simstate, 'a0') a1 = self.get_arg_val(iaddr, simstate, 'a1') pargs = ','.join(str(a) for a in [a0, a1]) simstate.set_register(iaddr, 'v0', SV.simZero) return self.add_logmsg(iaddr, simstate, pargs) class MIPStub_pthread_attr_setschedpolicy(MIPSimStub): def __init__(self) -> None: MIPSimStub.__init__(self, 'pthread_attr_setschedpolicy') def is_thread_operation(self) -> bool: return True def simulate(self, iaddr: str, simstate: "SimulationState") -> str: a0 = self.get_arg_val(iaddr, simstate, 'a0') a1 = self.get_arg_val(iaddr, simstate, 'a1') pargs = ','.join(str(a) for a in [a0, a1]) simstate.set_register(iaddr, 'v0', SV.simZero) return self.add_logmsg(iaddr, simstate, pargs) class MIPStub_pthread_mutex_init(MIPSimStub): def __init__(self) -> None: MIPSimStub.__init__(self, 'pthread_mutex_init') def is_thread_operation(self) -> bool: return True def simulate(self, iaddr: str, simstate: "SimulationState") -> str: a0 = self.get_arg_val(iaddr, simstate, 'a0') a1 = self.get_arg_val(iaddr, simstate, 'a1') pargs = ','.join(str(a) for a in [a0, a1]) simstate.set_register(iaddr, 'v0', SV.simZero) return self.add_logmsg(iaddr, simstate, pargs) class MIPStub_pthread_mutex_lock(MIPSimStub): def __init__(self) -> None: MIPSimStub.__init__(self, 'pthread_mutex_lock') def is_thread_operation(self) -> bool: return True def simulate(self, iaddr: str, simstate: "SimulationState") -> str: a0 = self.get_arg_val(iaddr, simstate, 'a0') simstate.set_register(iaddr, 'v0', SV.simZero) return self.add_logmsg(iaddr, simstate, str(a0)) class MIPStub_pthread_mutex_unlock(MIPSimStub): def __init__(self) -> None: MIPSimStub.__init__(self, 'pthread_mutex_unlock') def is_thread_operation(self) -> bool: return True def simulate(self, iaddr: str, simstate: "SimulationState") -> str: a0 = self.get_arg_val(iaddr, simstate, 'a0') simstate.set_register(iaddr, 'v0', SV.simZero) return self.add_logmsg(iaddr, simstate, str(a0)) class MIPStub_pthread_self(MIPSimStub): def __init__(self) -> None: MIPSimStub.__init__(self, 'pthread_self')
a merge or pull request operation. """ startLine: Optional[LineNumber] = None endLine: Optional[LineNumber] = None hunkContent: Optional[HunkContent] = None class MergeHunk(BaseModel): """ Information about merge hunks in a merge or pull request operation. """ isConflict: Optional[IsHunkConflict] = None source: Optional[MergeHunkDetail] = None destination: Optional[MergeHunkDetail] = None base: Optional[MergeHunkDetail] = None class MergeMetadata(BaseModel): """ Returns information about a merge or potential merge between a source reference and a destination reference in a pull request. """ isMerged: Optional[IsMerged] = None mergedBy: Optional[Arn] = None mergeCommitId: Optional[CommitId] = None mergeOption: Optional[MergeOptionTypeEnum] = None class ObjectTypeEnum(Enum): FILE = 'FILE' DIRECTORY = 'DIRECTORY' GIT_LINK = 'GIT_LINK' SYMBOLIC_LINK = 'SYMBOLIC_LINK' class PullRequestCreatedEventMetadata(BaseModel): """ Metadata about the pull request that is used when comparing the pull request source with its destination. """ repositoryName: Optional[RepositoryName] = None sourceCommitId: Optional[CommitId] = None destinationCommitId: Optional[CommitId] = None mergeBase: Optional[CommitId] = None class PullRequestStatusChangedEventMetadata(BaseModel): """ Information about a change to the status of a pull request. """ pullRequestStatus: Optional[PullRequestStatusEnum] = None class PullRequestSourceReferenceUpdatedEventMetadata(BaseModel): """ Information about an update to the source branch of a pull request. """ repositoryName: Optional[RepositoryName] = None beforeCommitId: Optional[CommitId] = None afterCommitId: Optional[CommitId] = None mergeBase: Optional[CommitId] = None class ReferenceName(AccountId): pass class PullRequestTarget(BaseModel): """ Returns information about a pull request target. """ repositoryName: Optional[RepositoryName] = None sourceReference: Optional[ReferenceName] = None destinationReference: Optional[ReferenceName] = None destinationCommit: Optional[CommitId] = None sourceCommit: Optional[CommitId] = None mergeBase: Optional[CommitId] = None mergeMetadata: Optional[MergeMetadata] = None class SourceFileSpecifier(BaseModel): """ Information about a source file that is part of changes made in a commit. """ filePath: Path isMove: Optional[IsMove] = None class ReactionEmoji(AccountId): pass class ReactionUsersList(BaseModel): __root__: List[Arn] class ReactionShortCode(AccountId): pass class ReactionUnicode(AccountId): pass class ReplacementTypeEnum(Enum): KEEP_BASE = 'KEEP_BASE' KEEP_SOURCE = 'KEEP_SOURCE' KEEP_DESTINATION = 'KEEP_DESTINATION' USE_NEW_CONTENT = 'USE_NEW_CONTENT' class RepositoryNameIdPair(BaseModel): """ Information about a repository name and ID. """ repositoryName: Optional[RepositoryName] = None repositoryId: Optional[RepositoryId] = None class RepositoryTriggerName(AccountId): pass class RepositoryTriggerCustomData(AccountId): pass class RepositoryTriggerEventEnum(Enum): all = 'all' updateReference = 'updateReference' createReference = 'createReference' deleteReference = 'deleteReference' class RepositoryTriggerExecutionFailureMessage(AccountId): pass class RepositoryTriggerExecutionFailure(BaseModel): """ A trigger failed to run. """ trigger: Optional[RepositoryTriggerName] = None failureMessage: Optional[RepositoryTriggerExecutionFailureMessage] = None class RepositoryTriggerExecutionFailureList(BaseModel): __root__: List[RepositoryTriggerExecutionFailure] class RepositoryTriggerNameList(BaseModel): __root__: List[RepositoryTriggerName] class SetFileModeEntry(BaseModel): """ Information about the file mode changes. """ filePath: Path fileMode: FileModeTypeEnum class SubModule(BaseModel): """ Returns information about a submodule reference in a repository folder. """ commitId: Optional[ObjectId] = None absolutePath: Optional[Path] = None relativePath: Optional[Path] = None class SymbolicLink(File): """ Returns information about a symbolic link in a repository folder. """ pass class TagKey(BaseModel): __root__: Annotated[str, Field(max_length=128, min_length=1)] class TagKeysList(BaseModel): __root__: List[TagKey] class TagValue(BaseModel): __root__: Annotated[str, Field(max_length=256, min_length=0)] class Target(BaseModel): """ Returns information about a target for a pull request. """ repositoryName: RepositoryName sourceReference: ReferenceName destinationReference: Optional[ReferenceName] = None class AssociateApprovalRuleTemplateWithRepositoryInput(BaseModel): approvalRuleTemplateName: ApprovalRuleTemplateName repositoryName: RepositoryName class BatchAssociateApprovalRuleTemplateWithRepositoriesOutput(BaseModel): associatedRepositoryNames: RepositoryNameList errors: BatchAssociateApprovalRuleTemplateWithRepositoriesErrorsList class BatchAssociateApprovalRuleTemplateWithRepositoriesInput(BaseModel): approvalRuleTemplateName: ApprovalRuleTemplateName repositoryNames: RepositoryNameList class BatchDescribeMergeConflictsInput(BaseModel): repositoryName: RepositoryName destinationCommitSpecifier: CommitName sourceCommitSpecifier: CommitName mergeOption: MergeOptionTypeEnum maxMergeHunks: Optional[MaxResults] = None maxConflictFiles: Optional[MaxResults] = None filePaths: Optional[FilePaths] = None conflictDetailLevel: Optional[ConflictDetailLevelTypeEnum] = None conflictResolutionStrategy: Optional[ConflictResolutionStrategyTypeEnum] = None nextToken: Optional[NextToken] = None class BatchDisassociateApprovalRuleTemplateFromRepositoriesOutput(BaseModel): disassociatedRepositoryNames: RepositoryNameList errors: BatchDisassociateApprovalRuleTemplateFromRepositoriesErrorsList class BatchDisassociateApprovalRuleTemplateFromRepositoriesInput(BaseModel): approvalRuleTemplateName: ApprovalRuleTemplateName repositoryNames: RepositoryNameList class BatchGetCommitsInput(BaseModel): commitIds: CommitIdsInputList repositoryName: RepositoryName class BatchGetRepositoriesInput(BaseModel): """ Represents the input of a batch get repositories operation. """ repositoryNames: RepositoryNameList class CreateApprovalRuleTemplateOutput(BaseModel): approvalRuleTemplate: ApprovalRuleTemplate class CreateApprovalRuleTemplateInput(BaseModel): approvalRuleTemplateName: ApprovalRuleTemplateName approvalRuleTemplateContent: ApprovalRuleTemplateContent approvalRuleTemplateDescription: Optional[ApprovalRuleTemplateDescription] = None class CreateBranchInput(BaseModel): """ Represents the input of a create branch operation. """ repositoryName: RepositoryName branchName: BranchName commitId: CommitId class CreatePullRequestApprovalRuleInput(BaseModel): pullRequestId: PullRequestId approvalRuleName: ApprovalRuleName approvalRuleContent: ApprovalRuleContent class CreateRepositoryInput(BaseModel): """ Represents the input of a create repository operation. """ repositoryName: RepositoryName repositoryDescription: Optional[RepositoryDescription] = None tags: Optional[TagsMap] = None class CreateUnreferencedMergeCommitOutput(BaseModel): commitId: Optional[ObjectId] = None treeId: Optional[ObjectId] = None class DeleteApprovalRuleTemplateOutput(BaseModel): approvalRuleTemplateId: ApprovalRuleTemplateId class DeleteApprovalRuleTemplateInput(BaseModel): approvalRuleTemplateName: ApprovalRuleTemplateName class DeleteBranchOutput(BaseModel): """ Represents the output of a delete branch operation. """ deletedBranch: Optional[BranchInfo] = None class DeleteBranchInput(BaseModel): """ Represents the input of a delete branch operation. """ repositoryName: RepositoryName branchName: BranchName class DeleteCommentContentOutput(BaseModel): comment: Optional[Comment] = None class DeleteCommentContentInput(BaseModel): commentId: CommentId class DeleteFileOutput(BaseModel): commitId: ObjectId blobId: ObjectId treeId: ObjectId filePath: Path class DeleteFileInput(BaseModel): repositoryName: RepositoryName branchName: BranchName filePath: Path parentCommitId: CommitId keepEmptyFolders: Optional[KeepEmptyFolders] = None commitMessage: Optional[Message] = None name: Optional[Name] = None email: Optional[Email] = None class DeletePullRequestApprovalRuleOutput(BaseModel): approvalRuleId: ApprovalRuleId class DeletePullRequestApprovalRuleInput(BaseModel): pullRequestId: PullRequestId approvalRuleName: ApprovalRuleName class DeleteRepositoryOutput(BaseModel): """ Represents the output of a delete repository operation. """ repositoryId: Optional[RepositoryId] = None class DeleteRepositoryInput(BaseModel): """ Represents the input of a delete repository operation. """ repositoryName: RepositoryName class DescribeMergeConflictsInput(BaseModel): repositoryName: RepositoryName destinationCommitSpecifier: CommitName sourceCommitSpecifier: CommitName mergeOption: MergeOptionTypeEnum maxMergeHunks: Optional[MaxResults] = None filePath: Path conflictDetailLevel: Optional[ConflictDetailLevelTypeEnum] = None conflictResolutionStrategy: Optional[ConflictResolutionStrategyTypeEnum] = None nextToken: Optional[NextToken] = None class DescribePullRequestEventsInput(BaseModel): pullRequestId: PullRequestId pullRequestEventType: Optional[PullRequestEventType] = None actorArn: Optional[Arn] = None nextToken: Optional[NextToken] = None maxResults: Optional[MaxResults] = None class DisassociateApprovalRuleTemplateFromRepositoryInput(BaseModel): approvalRuleTemplateName: ApprovalRuleTemplateName repositoryName: RepositoryName class EvaluatePullRequestApprovalRulesInput(BaseModel): pullRequestId: PullRequestId revisionId: RevisionId class GetApprovalRuleTemplateOutput(CreateApprovalRuleTemplateOutput): pass class GetApprovalRuleTemplateInput(BaseModel): approvalRuleTemplateName: ApprovalRuleTemplateName class GetBlobOutput(BaseModel): """ Represents the output of a get blob operation. """ content: Blob class GetBlobInput(BaseModel): """ Represents the input of a get blob operation. """ repositoryName: RepositoryName blobId: ObjectId class GetBranchOutput(BaseModel): """ Represents the output of a get branch operation. """ branch: Optional[BranchInfo] = None class GetBranchInput(BaseModel): """ Represents the input of a get branch operation. """ repositoryName: Optional[RepositoryName] = None branchName: Optional[BranchName] = None class GetCommentOutput(DeleteCommentContentOutput): pass class GetCommentInput(BaseModel): commentId: CommentId class GetCommentReactionsInput(BaseModel): commentId: CommentId reactionUserArn: Optional[Arn] = None nextToken: Optional[NextToken] = None maxResults: Optional[MaxResults] = None class GetCommentsForComparedCommitInput(BaseModel): repositoryName: RepositoryName beforeCommitId: Optional[CommitId] = None afterCommitId: CommitId nextToken: Optional[NextToken] = None maxResults: Optional[MaxResults] = None class GetCommentsForPullRequestInput(BaseModel): pullRequestId: PullRequestId repositoryName: Optional[RepositoryName] = None beforeCommitId: Optional[CommitId] = None afterCommitId: Optional[CommitId] = None nextToken: Optional[NextToken] = None maxResults: Optional[MaxResults] = None class GetCommitInput(BaseModel): """ Represents the input of a get commit operation. """ repositoryName: RepositoryName commitId: ObjectId class GetDifferencesOutput(BaseModel): differences: Optional[DifferenceList] = None NextToken: Optional[NextToken] = None class GetDifferencesInput(BaseModel): repositoryName: RepositoryName beforeCommitSpecifier: Optional[CommitName] = None afterCommitSpecifier: CommitName beforePath: Optional[Path] = None afterPath: Optional[Path] = None MaxResults: Optional[Limit] = None NextToken: Optional[NextToken] = None class GetFileOutput(BaseModel): commitId: ObjectId blobId: ObjectId filePath: Path fileMode: FileModeTypeEnum fileSize: ObjectSize fileContent: FileContent class GetFileInput(BaseModel): repositoryName: RepositoryName commitSpecifier: Optional[CommitName] = None filePath: Path class GetFolderInput(BaseModel): repositoryName: RepositoryName commitSpecifier: Optional[CommitName] = None folderPath: Path class GetMergeCommitOutput(BaseModel): sourceCommitId: Optional[ObjectId] = None destinationCommitId: Optional[ObjectId] = None baseCommitId: Optional[ObjectId] = None mergedCommitId: Optional[ObjectId] = None class GetMergeCommitInput(BaseModel): repositoryName: RepositoryName sourceCommitSpecifier: CommitName destinationCommitSpecifier: CommitName conflictDetailLevel: Optional[ConflictDetailLevelTypeEnum] = None conflictResolutionStrategy: Optional[ConflictResolutionStrategyTypeEnum] = None class GetMergeConflictsInput(BaseModel): repositoryName: RepositoryName destinationCommitSpecifier: CommitName sourceCommitSpecifier: CommitName mergeOption: MergeOptionTypeEnum conflictDetailLevel: Optional[ConflictDetailLevelTypeEnum] = None maxConflictFiles: Optional[MaxResults] = None conflictResolutionStrategy: Optional[ConflictResolutionStrategyTypeEnum] = None nextToken: Optional[NextToken] = None class GetMergeOptionsOutput(BaseModel): mergeOptions: MergeOptions sourceCommitId: ObjectId destinationCommitId: ObjectId baseCommitId: ObjectId class GetMergeOptionsInput(BaseModel): repositoryName: RepositoryName sourceCommitSpecifier: CommitName destinationCommitSpecifier: CommitName conflictDetailLevel: Optional[ConflictDetailLevelTypeEnum] = None conflictResolutionStrategy: Optional[ConflictResolutionStrategyTypeEnum] = None class GetPullRequestInput(BaseModel): pullRequestId: PullRequestId class GetPullRequestApprovalStatesOutput(BaseModel): approvals: Optional[ApprovalList] = None class GetPullRequestApprovalStatesInput(BaseModel): pullRequestId: PullRequestId revisionId: RevisionId class GetPullRequestOverrideStateOutput(BaseModel): overridden: Optional[Overridden] = None overrider: Optional[Arn] = None class GetPullRequestOverrideStateInput(BaseModel): pullRequestId: PullRequestId revisionId: RevisionId class GetRepositoryInput(BaseModel): """ Represents the input of a get repository operation. """ repositoryName: RepositoryName class GetRepositoryTriggersInput(BaseModel): """ Represents the input of a get repository triggers operation. """ repositoryName: RepositoryName class ListApprovalRuleTemplatesOutput(BaseModel): approvalRuleTemplateNames: Optional[ApprovalRuleTemplateNameList] = None nextToken: Optional[NextToken] = None class ListApprovalRuleTemplatesInput(BaseModel): nextToken: Optional[NextToken] = None maxResults: Optional[MaxResults] = None class ListAssociatedApprovalRuleTemplatesForRepositoryOutput( ListApprovalRuleTemplatesOutput ): pass class ListAssociatedApprovalRuleTemplatesForRepositoryInput(BaseModel): repositoryName: RepositoryName nextToken: Optional[NextToken] = None maxResults: Optional[MaxResults] = None class ListBranchesOutput(BaseModel): """ Represents the output of a list branches operation. """ branches: Optional[BranchNameList] = None nextToken: Optional[NextToken] = None class ListBranchesInput(BaseModel): """ Represents the input of a list branches operation. """ repositoryName: RepositoryName nextToken: Optional[NextToken] = None class ListPullRequestsOutput(BaseModel): pullRequestIds: PullRequestIdList nextToken: Optional[NextToken] = None class ListPullRequestsInput(BaseModel): repositoryName: RepositoryName authorArn: Optional[Arn] = None pullRequestStatus: Optional[PullRequestStatusEnum] = None nextToken: Optional[NextToken] = None maxResults: Optional[MaxResults] = None class ListRepositoriesInput(BaseModel): """ Represents the input of a list repositories operation. """ nextToken: Optional[NextToken] = None sortBy: Optional[SortByEnum] = None order: Optional[OrderEnum] = None class ListRepositoriesForApprovalRuleTemplateOutput(BaseModel): repositoryNames: Optional[RepositoryNameList] = None nextToken: Optional[NextToken] = None class ListRepositoriesForApprovalRuleTemplateInput(BaseModel): approvalRuleTemplateName: ApprovalRuleTemplateName nextToken: Optional[NextToken] = None maxResults: Optional[MaxResults] = None class ListTagsForResourceOutput(BaseModel): tags: Optional[TagsMap] = None nextToken: Optional[NextToken] = None class ListTagsForResourceInput(BaseModel): resourceArn: ResourceArn nextToken: Optional[NextToken] = None class MergeBranchesByFastForwardOutput(CreateUnreferencedMergeCommitOutput): pass class MergeBranchesByFastForwardInput(BaseModel): repositoryName: RepositoryName sourceCommitSpecifier: CommitName destinationCommitSpecifier: CommitName targetBranch: Optional[BranchName] = None class MergeBranchesBySquashOutput(CreateUnreferencedMergeCommitOutput): pass class MergeBranchesByThreeWayOutput(CreateUnreferencedMergeCommitOutput): pass class MergePullRequestByFastForwardInput(BaseModel): pullRequestId: PullRequestId repositoryName: RepositoryName sourceCommitId: Optional[ObjectId] = None class OverridePullRequestApprovalRulesInput(BaseModel): pullRequestId: PullRequestId revisionId: RevisionId overrideStatus: OverrideStatus class PostCommentReplyOutput(DeleteCommentContentOutput): pass class PostCommentReplyInput(BaseModel): inReplyTo: CommentId clientRequestToken: Optional[ClientRequestToken] = None content: Content class PutCommentReactionInput(BaseModel): commentId: CommentId reactionValue: ReactionValue class PutFileOutput(BaseModel): commitId: ObjectId blobId: ObjectId treeId: ObjectId class PutFileInput(BaseModel):
<reponame>anthem-ai/fhir-types from typing import Any, List, Literal, TypedDict from .FHIR_Attachment import FHIR_Attachment from .FHIR_code import FHIR_code from .FHIR_CodeableConcept import FHIR_CodeableConcept from .FHIR_Contract_ContentDefinition import FHIR_Contract_ContentDefinition from .FHIR_Contract_Friendly import FHIR_Contract_Friendly from .FHIR_Contract_Legal import FHIR_Contract_Legal from .FHIR_Contract_Rule import FHIR_Contract_Rule from .FHIR_Contract_Signer import FHIR_Contract_Signer from .FHIR_Contract_Term import FHIR_Contract_Term from .FHIR_dateTime import FHIR_dateTime from .FHIR_Element import FHIR_Element from .FHIR_id import FHIR_id from .FHIR_Identifier import FHIR_Identifier from .FHIR_Meta import FHIR_Meta from .FHIR_Narrative import FHIR_Narrative from .FHIR_Period import FHIR_Period from .FHIR_Reference import FHIR_Reference from .FHIR_string import FHIR_string from .FHIR_uri import FHIR_uri # Legally enforceable, formally recorded unilateral or bilateral directive i.e., a policy or agreement. FHIR_Contract = TypedDict( "FHIR_Contract", { # This is a Contract resource "resourceType": Literal["Contract"], # The logical id of the resource, as used in the URL for the resource. Once assigned, this value never changes. "id": FHIR_id, # The metadata about the resource. This is content that is maintained by the infrastructure. Changes to the content might not always be associated with version changes to the resource. "meta": FHIR_Meta, # A reference to a set of rules that were followed when the resource was constructed, and which must be understood when processing the content. Often, this is a reference to an implementation guide that defines the special rules along with other profiles etc. "implicitRules": FHIR_uri, # Extensions for implicitRules "_implicitRules": FHIR_Element, # The base language in which the resource is written. "language": FHIR_code, # Extensions for language "_language": FHIR_Element, # A human-readable narrative that contains a summary of the resource and can be used to represent the content of the resource to a human. The narrative need not encode all the structured data, but is required to contain sufficient detail to make it "clinically safe" for a human to just read the narrative. Resource definitions may define what content should be represented in the narrative to ensure clinical safety. "text": FHIR_Narrative, # These resources do not have an independent existence apart from the resource that contains them - they cannot be identified independently, and nor can they have their own independent transaction scope. "contained": List[Any], # May be used to represent additional information that is not part of the basic definition of the resource. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. "extension": List[Any], # May be used to represent additional information that is not part of the basic definition of the resource and that modifies the understanding of the element that contains it and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself). "modifierExtension": List[Any], # Unique identifier for this Contract or a derivative that references a Source Contract. "identifier": List[FHIR_Identifier], # Canonical identifier for this contract, represented as a URI (globally unique). "url": FHIR_uri, # Extensions for url "_url": FHIR_Element, # An edition identifier used for business purposes to label business significant variants. "version": FHIR_string, # Extensions for version "_version": FHIR_Element, # The status of the resource instance. "status": FHIR_code, # Extensions for status "_status": FHIR_Element, # Legal states of the formation of a legal instrument, which is a formally executed written document that can be formally attributed to its author, records and formally expresses a legally enforceable act, process, or contractual duty, obligation, or right, and therefore evidences that act, process, or agreement. "legalState": FHIR_CodeableConcept, # The URL pointing to a FHIR-defined Contract Definition that is adhered to in whole or part by this Contract. "instantiatesCanonical": FHIR_Reference, # The URL pointing to an externally maintained definition that is adhered to in whole or in part by this Contract. "instantiatesUri": FHIR_uri, # Extensions for instantiatesUri "_instantiatesUri": FHIR_Element, # The minimal content derived from the basal information source at a specific stage in its lifecycle. "contentDerivative": FHIR_CodeableConcept, # When this Contract was issued. "issued": FHIR_dateTime, # Extensions for issued "_issued": FHIR_Element, # Relevant time or time-period when this Contract is applicable. "applies": FHIR_Period, # Event resulting in discontinuation or termination of this Contract instance by one or more parties to the contract. "expirationType": FHIR_CodeableConcept, # The target entity impacted by or of interest to parties to the agreement. "subject": List[FHIR_Reference], # A formally or informally recognized grouping of people, principals, organizations, or jurisdictions formed for the purpose of achieving some form of collective action such as the promulgation, administration and enforcement of contracts and policies. "authority": List[FHIR_Reference], # Recognized governance framework or system operating with a circumscribed scope in accordance with specified principles, policies, processes or procedures for managing rights, actions, or behaviors of parties or principals relative to resources. "domain": List[FHIR_Reference], # Sites in which the contract is complied with, exercised, or in force. "site": List[FHIR_Reference], # A natural language name identifying this Contract definition, derivative, or instance in any legal state. Provides additional information about its content. This name should be usable as an identifier for the module by machine processing applications such as code generation. "name": FHIR_string, # Extensions for name "_name": FHIR_Element, # A short, descriptive, user-friendly title for this Contract definition, derivative, or instance in any legal state.t giving additional information about its content. "title": FHIR_string, # Extensions for title "_title": FHIR_Element, # An explanatory or alternate user-friendly title for this Contract definition, derivative, or instance in any legal state.t giving additional information about its content. "subtitle": FHIR_string, # Extensions for subtitle "_subtitle": FHIR_Element, # Alternative representation of the title for this Contract definition, derivative, or instance in any legal state., e.g., a domain specific contract number related to legislation. "alias": List[FHIR_string], # Extensions for alias "_alias": List[FHIR_Element], # The individual or organization that authored the Contract definition, derivative, or instance in any legal state. "author": FHIR_Reference, # A selector of legal concerns for this Contract definition, derivative, or instance in any legal state. "scope": FHIR_CodeableConcept, # Narrows the range of legal concerns to focus on the achievement of specific contractual objectives. "topicCodeableConcept": FHIR_CodeableConcept, # Narrows the range of legal concerns to focus on the achievement of specific contractual objectives. "topicReference": FHIR_Reference, # A high-level category for the legal instrument, whether constructed as a Contract definition, derivative, or instance in any legal state. Provides additional information about its content within the context of the Contract's scope to distinguish the kinds of systems that would be interested in the contract. "type": FHIR_CodeableConcept, # Sub-category for the Contract that distinguishes the kinds of systems that would be interested in the Contract within the context of the Contract's scope. "subType": List[FHIR_CodeableConcept], # Precusory content developed with a focus and intent of supporting the formation a Contract instance, which may be associated with and transformable into a Contract. "contentDefinition": FHIR_Contract_ContentDefinition, # One or more Contract Provisions, which may be related and conveyed as a group, and may contain nested groups. "term": List[FHIR_Contract_Term], # Information that may be needed by/relevant to the performer in their execution of this term action. "supportingInfo": List[FHIR_Reference], # Links to Provenance records for past versions of this Contract definition, derivative, or instance, which identify key state transitions or updates that are likely to be relevant to a user looking at the current version of the Contract. The Provence.entity indicates the target that was changed in the update. http://build.fhir.org/provenance-definitions.html#Provenance.entity. "relevantHistory": List[FHIR_Reference],
types = self.contact_types[i] for j in range(len(types)): nb = self.contact_ids[i][j] # Specific selection function if ((plates_pairs == 'all') or ('('+str(i)+','+str(nb)+')' == plates_pairs) or ('('+str(i)+','+str(nb)+')' in plates_pairs) or ('('+str(nb)+','+str(i)+')' == plates_pairs) or ('('+str(nb)+','+str(i)+')' in plates_pairs)): i_want_a_tenon = True else: i_want_a_tenon = False # For all specified Edgewise connection if (types[j] in 'SES') and (nb > i) and i_want_a_tenon is True: # Prerequisite if tenon_number < 1 : raise Exception('tenon_number must be greater than 1') if tenon_width <= 0 : raise Exception('tenon_width must be greater than 0') if tenon_spacing <= 0 : raise Exception('tenon_spacing must be greater than 0') #deal with male/female nb = self.contact_ids[i][j] if types[j] == 'SE': spread_angle=-spread_angle male, female = i, nb else: male, female = nb, i #compute plane angles angles = [] if parallel_tenons is True: if tenon_number == 1: angles = [0,0] else: for k in range(tenon_number): angles.append(- spread_angle + 2*k*spread_angle/(tenon_number-1)) angles.append(- spread_angle + 2*k*spread_angle/(tenon_number-1)) else: for k in range(2*tenon_number): angles.append(- spread_angle + 2*k*(spread_angle/(2*tenon_number-1))) #tenon locations cp = self.contact_planes[i][j] if tenon_number > 1 : dist = (float(tenon_number-1) /2) * (tenon_width + tenon_spacing) pointA = rs.CopyObject(cp.Origin, cp.XAxis * dist) pointB = rs.CopyObject(cp.Origin, -cp.XAxis * dist) line = rs.AddLine(pointA, pointB) shifted_line = rs.CopyObject(line, cp.XAxis * tenon_shift) location = rs.DivideCurve(shifted_line, tenon_number-1) else: location = [rs.CopyObject(cp.Origin, cp.XAxis * tenon_shift)] #get insertion vector vec = self.contact_vectors[i][j] if custom_insertion != None: vec=custom_insertion #get and reorder top/bottom tpf = self.plates[female].top_plane bpf = self.plates[female].bottom_plane if rs.Distance(tpf.Origin, cp.Origin) < rs.Distance(bpf.Origin, cp.Origin): self.switch_top_bottom(plates=[female]) tpm = self.plates[male].top_plane bpm = self.plates[male].bottom_plane tcf = self.plates[female].top_center bcf = self.plates[female].bottom_center if rs.Distance(tpm.Origin, bcf) < rs.Distance(bpm.Origin, bcf): self.switch_top_bottom(plates=[male]) tpm = self.plates[male].top_plane bpm = self.plates[male].bottom_plane #create tenons m_poly_top=[] m_poly_bottom=[] f_poly_top=[] f_poly_bottom=[] for k in range(tenon_number): #plane_location rot_vec_1 = rs.VectorRotate(cp.YAxis, angles[2*k], cp.ZAxis) rot_vec_2 = rs.VectorRotate(cp.YAxis, angles[2*k+1], cp.ZAxis) loc1= rs.CopyObject(location[k], cp.XAxis * tenon_width/2) loc2= rs.CopyObject(location[k], cp.XAxis * -tenon_width/2) pl1 = rs.PlaneFromFrame(loc1,vec,rot_vec_1) pl2 = rs.PlaneFromFrame(loc2,vec,rot_vec_2) if rs.IsVectorParallelTo(cp.YAxis, vec) !=0: pl1 = rs.PlaneFromFrame(loc1,vec,cp.ZAxis) pl2 = rs.PlaneFromFrame(loc2,vec,cp.ZAxis) #solid creation solid = rs.coercebrep(Toolbox.Breps.box_from_6_planes([pl1,pl2],[tpm,bpm],[tpf,bpf])) if solid.SolidOrientation == rg.BrepSolidOrientation.Inward: rg.Brep.Flip(solid) self.plates[male].joints_positives.append(copy.deepcopy(solid)) self.plates[female].joints_negatives.append(copy.deepcopy(solid)) #contour creation m_poly_top.append(Toolbox.Planes.three_planes_intersection(bpf,tpm,pl1)) m_poly_top.append(Toolbox.Planes.three_planes_intersection(tpf,tpm,pl1)) m_poly_top.append(Toolbox.Planes.three_planes_intersection(tpf,tpm,pl2)) m_poly_top.append(Toolbox.Planes.three_planes_intersection(bpf,tpm,pl2)) m_poly_bottom.append(Toolbox.Planes.three_planes_intersection(bpf,bpm,pl1)) m_poly_bottom.append(Toolbox.Planes.three_planes_intersection(tpf,bpm,pl1)) m_poly_bottom.append(Toolbox.Planes.three_planes_intersection(tpf,bpm,pl2)) m_poly_bottom.append(Toolbox.Planes.three_planes_intersection(bpf,bpm,pl2)) f_poly_top.append(Toolbox.Planes.three_planes_intersection(tpm,tpf,pl1)) f_poly_top.append(Toolbox.Planes.three_planes_intersection(bpm,tpf,pl1)) f_poly_top.append(Toolbox.Planes.three_planes_intersection(bpm,tpf,pl2)) f_poly_top.append(Toolbox.Planes.three_planes_intersection(tpm,tpf,pl2)) f_poly_bottom.append(Toolbox.Planes.three_planes_intersection(tpm,bpf,pl1)) f_poly_bottom.append(Toolbox.Planes.three_planes_intersection(bpm,bpf,pl1)) f_poly_bottom.append(Toolbox.Planes.three_planes_intersection(bpm,bpf,pl2)) f_poly_bottom.append(Toolbox.Planes.three_planes_intersection(tpm,bpf,pl2)) self.plates[male].top_contour = Toolbox.Curves.insert_curves(self.plates[male].top_contour, [rs.AddPolyline(m_poly_top)]) self.plates[male].bottom_contour = Toolbox.Curves.insert_curves(self.plates[male].bottom_contour, [rs.AddPolyline(m_poly_bottom)]) self.plates[female].top_contour = Toolbox.Curves.insert_curves(self.plates[female].top_contour, [rs.AddPolyline(f_poly_top)]) self.plates[female].bottom_contour = Toolbox.Curves.insert_curves(self.plates[female].bottom_contour, [rs.AddPolyline(f_poly_bottom)]) # Structural analysis for k in range(len(location)): pm=rs.CurveClosestPoint(self.FEM_plates[male],location[k]) pf=rs.CurveClosestPoint(self.FEM_plates[female],location[k]) self.FEM_plates[male] = scriptcontext.doc.Objects.Add(self.FEM_plates[male]) self.FEM_plates[female] = scriptcontext.doc.Objects.Add(self.FEM_plates[female]) joint_line = rs.AddLine(rs.EvaluateCurve(self.FEM_plates[male],pm), rs.EvaluateCurve(self.FEM_plates[female],pf)) rs.InsertCurveKnot(self.FEM_plates[male],pm) rs.InsertCurveKnot(self.FEM_plates[female],pf) self.FEM_plates[male] = rs.coercecurve(self.FEM_plates[male]) self.FEM_plates[female] = rs.coercecurve(self.FEM_plates[female]) self.FEM_joints.append(rs.coercecurve(joint_line)) @__skip_nones def add_fingers(self, plates_pairs='all', finger_number_1=2.0, finger_length_1='default', finger_width_1=1.0, finger_number_2=2.0, finger_length_2='default', finger_width_2=1.0, finger_spacing=0.0, finger_shift=0.0, mirror=False): """Add finger joints on Side-to-Side contact zones.""" #cast plate_pairs to string if plates_pairs != 'all': for i in range(len(plates_pairs)): plates_pairs[i] = str(plates_pairs[i]) #conditional loop for i in range(self.count): types = self.contact_types[i] for j in range(len(types)): nb = self.contact_ids[i][j] #specific selection function if ((plates_pairs == 'all') or ('('+str(i)+','+str(nb)+')' == plates_pairs) or ('('+str(i)+','+str(nb)+')' in plates_pairs)): i_want_a_finger = True else: i_want_a_finger = False #for all specified Side-to-Side connection if (types[j] == 'SS') and (nb > i) and (i_want_a_finger is True): #prerequisite if finger_length_1 < 0 : raise Exception('finger_length_1 must be greater than 0') if finger_length_2 < 0 : raise Exception('finger_length_2 must be greater than 0') #joint location zone = self.contact_zones[i][j] rectangle = Toolbox.Curves.trapeze_to_rectangle(rs.JoinCurves(rs.DuplicateEdgeCurves(zone))) if Toolbox.Curves.rectangle_dimensions(rectangle)[0] < (finger_width_1*finger_number_1 + finger_width_2*finger_number_2 + 2*finger_spacing*(finger_number_1+finger_number_2-1) + finger_shift*2): excess = (finger_width_1*finger_number_1 + finger_width_2*finger_number_2 + 2*finger_spacing*(finger_number_1+finger_number_2-1) + finger_shift*2) / (Toolbox.Curves.rectangle_dimensions(rectangle)[0]) * 100 raise Exception(' Joint is to large ('+ str(int(excess)) +' %) for contact area between plate '+str(i)+' and plate '+str(nb)) plane_male = self.plates[i].top_plane plane_female = self.plates[nb].top_plane center = self.contact_centers[i][j] joint_plane = rs.PlaneFromNormal(center, self.contact_planes[i][j].YAxis, self.contact_planes[i][j].XAxis) #default length 1 if (finger_length_1 == 'default') or (finger_length_1 == 0) : if abs(rs.IsVectorParallelTo(plane_male.ZAxis, plane_female.ZAxis)) == 0 and rs.IsVectorPerpendicularTo(plane_male.ZAxis, plane_female.ZAxis) is False: alpha = rs.VectorAngle(plane_male.ZAxis, plane_female.ZAxis) thickness_female = self.plates[nb].thickness new_finger_length_1 = abs(thickness_female / math.sin(math.radians(180-alpha))) else: new_finger_length_1 = self.plates[nb].thickness else: new_finger_length_1 = finger_length_1 #default length 2 if (finger_length_2 == 'default') or (finger_length_2 == 0) : if abs(rs.IsVectorParallelTo(plane_male.ZAxis, plane_female.ZAxis)) == 0 and rs.IsVectorPerpendicularTo(plane_male.ZAxis, plane_female.ZAxis) is False: alpha = rs.VectorAngle(plane_male.ZAxis, plane_female.ZAxis) thickness_male = self.plates[i].thickness new_finger_length_2 = abs(thickness_male / math.sin(math.radians(180-alpha))) else: new_finger_length_2 = self.plates[i].thickness else: new_finger_length_2 = finger_length_2 #correct length projection if abs(rs.IsVectorParallelTo(plane_male.ZAxis, joint_plane.ZAxis)) == 0: beta = rs.VectorAngle(plane_male.ZAxis, joint_plane.ZAxis) new_finger_length_1 = new_finger_length_1 * abs(math.cos(math.radians(beta))) if abs(rs.IsVectorParallelTo(plane_female.ZAxis, joint_plane.ZAxis)) == 0: beta = rs.VectorAngle(plane_female.ZAxis, joint_plane.ZAxis) new_finger_length_2 = new_finger_length_2*abs(math.cos(math.radians(beta))) #configuration (alternate or centered) if (finger_number_1 + finger_number_2) % 2 == 0: #alternate if mirror is False: center_1 = rs.CopyObject(joint_plane.Origin, joint_plane.XAxis * (finger_spacing + finger_width_2) /2) center_2 = rs.CopyObject(joint_plane.Origin, -joint_plane.XAxis * (finger_spacing + finger_width_1) /2) else: center_1 = rs.CopyObject(joint_plane.Origin, -joint_plane.XAxis * (finger_spacing + finger_width_2) /2) center_2 = rs.CopyObject(joint_plane.Origin, joint_plane.XAxis * (finger_spacing + finger_width_1) /2) else: #centered center_1 = joint_plane.Origin center_2 = joint_plane.Origin #finger location - first side if finger_number_1 > 1 : dist = (float(finger_number_1 -1) /2) * (finger_width_1 + finger_width_2 + 2*finger_spacing) pointA = rs.CopyObject(center_1, joint_plane.XAxis * dist) pointB = rs.CopyObject(center_1, -joint_plane.XAxis * dist) line = rs.AddLine(pointA, pointB) shifted_line = rs.CopyObject(line, joint_plane.XAxis * finger_shift) location_1 = rs.DivideCurve(shifted_line, finger_number_1 -1) else: location_1 = [rs.CopyObject(center_1, joint_plane.XAxis * finger_shift)] #finger location - second side if finger_number_2 > 1 : dist = (float(finger_number_2 -1) /2) * (finger_width_1 + finger_width_2 +2*finger_spacing) pointA = rs.CopyObject(center_2, joint_plane.XAxis * dist) pointB = rs.CopyObject(center_2, -joint_plane.XAxis * dist) line = rs.AddLine(pointA, pointB) shifted_line = rs.CopyObject(line, joint_plane.XAxis * finger_shift) location_2 = rs.DivideCurve(shifted_line, finger_number_2 -1) else: location_2 = [rs.CopyObject(center_2, joint_plane.XAxis * finger_shift)] #solid - first side for k in range(len(location_2)): #base polyline point1 = rs.coerce3dpoint(rs.CopyObject(location_2[k], joint_plane.XAxis * finger_width_2/2)) point4 = rs.coerce3dpoint(rs.CopyObject(location_2[k], -joint_plane.XAxis * finger_width_2/2)) point2 = rs.coerce3dpoint(rs.CopyObject(point1, joint_plane.YAxis * new_finger_length_2)) point3 = rs.coerce3dpoint(rs.CopyObject(point4, joint_plane.YAxis * new_finger_length_2)) polyline = [point1, point2, point3, point4, point1] #projection for joint negative proj_top_n = rg.Polyline(copy.deepcopy(polyline)) proj_top_n.Transform(rg.Transform.ProjectAlong(self.plates[i].top_plane, joint_plane.ZAxis)) proj_top_n =proj_top_n.ToArray() proj_bottom_n = rg.Polyline(copy.deepcopy(polyline)) proj_bottom_n.Transform(rg.Transform.ProjectAlong(self.plates[i].bottom_plane, joint_plane.ZAxis)) proj_bottom_n = proj_bottom_n.ToArray() finger_box_n = box = rg.Brep.CreateFromBox(proj_top_n[0:4] + proj_bottom_n[0:4]) self.plates[i].joints_negatives.append(finger_box_n) #projection for joint positive proj_top_p = rg.Polyline(copy.deepcopy(polyline)) proj_top_p.Transform(rg.Transform.ProjectAlong(self.plates[nb].top_plane, joint_plane.ZAxis)) proj_top_p =proj_top_p.ToArray() proj_bottom_p = rg.Polyline(copy.deepcopy(polyline)) proj_bottom_p.Transform(rg.Transform.ProjectAlong(self.plates[nb].bottom_plane, joint_plane.ZAxis)) proj_bottom_p = proj_bottom_p.ToArray() finger_box_p = box = rg.Brep.CreateFromBox(proj_top_p[0:4] + proj_bottom_p[0:4]) #if (finger_length_2 == 'default') or (finger_length_2 == 0) : top_plane = rs.coerceplane(self.plates[i].top_plane) bottom_plane = rs.coerceplane(self.plates[i].bottom_plane) finger_box_p = Toolbox.Breps.slice_2_planes(finger_box_p, top_plane, bottom_plane) self.plates[nb].joints_positives.append(finger_box_p) # contour top_poly_n = rs.AddPolyline([proj_top_n[0],proj_top_n[1], proj_top_n[2], proj_top_n[3]]) bottom_poly_n = rs.AddPolyline([proj_bottom_n[0],proj_bottom_n[1], proj_bottom_n[2], proj_bottom_n[3]]) top_poly_p = rs.AddPolyline([proj_top_p[0],proj_top_p[1], proj_top_p[2], proj_top_p[3]]) bottom_poly_p = rs.AddPolyline([proj_bottom_p[0],proj_bottom_p[1], proj_bottom_p[2], proj_bottom_p[3]]) self.plates[nb].top_contour = Toolbox.Curves.insert_curves(self.plates[nb].top_contour, [top_poly_p]) self.plates[nb].bottom_contour = Toolbox.Curves.insert_curves(self.plates[nb].bottom_contour, [bottom_poly_p]) self.plates[i].top_contour = Toolbox.Curves.insert_curves(self.plates[i].top_contour, [top_poly_n]) self.plates[i].bottom_contour = Toolbox.Curves.insert_curves(self.plates[i].bottom_contour, [bottom_poly_n]) #solid - second side for k in range(len(location_1)): #base polyline point1 = rs.coerce3dpoint(rs.CopyObject(location_1[k], joint_plane.XAxis * finger_width_1/2)) point4 = rs.coerce3dpoint(rs.CopyObject(location_1[k], -joint_plane.XAxis * finger_width_1/2)) point2 = rs.coerce3dpoint(rs.CopyObject(point1, -joint_plane.YAxis * new_finger_length_1)) point3 = rs.coerce3dpoint(rs.CopyObject(point4, -joint_plane.YAxis * new_finger_length_1)) polyline = [point1, point2, point3, point4, point1] #projection for joint negative proj_top_n = rg.Polyline(copy.deepcopy(polyline)) proj_top_n.Transform(rg.Transform.ProjectAlong(self.plates[nb].top_plane, joint_plane.ZAxis)) proj_top_n =proj_top_n.ToArray() proj_bottom_n = rg.Polyline(copy.deepcopy(polyline)) proj_bottom_n.Transform(rg.Transform.ProjectAlong(self.plates[nb].bottom_plane, joint_plane.ZAxis)) proj_bottom_n = proj_bottom_n.ToArray() finger_box_n = box = rg.Brep.CreateFromBox(proj_top_n[0:4] + proj_bottom_n[0:4]) self.plates[nb].joints_negatives.append(finger_box_n) #projection for joint positive proj_top_p = rg.Polyline(copy.deepcopy(polyline)) proj_top_p.Transform(rg.Transform.ProjectAlong(self.plates[i].top_plane, joint_plane.ZAxis)) proj_top_p =proj_top_p.ToArray() proj_bottom_p = rg.Polyline(copy.deepcopy(polyline)) proj_bottom_p.Transform(rg.Transform.ProjectAlong(self.plates[i].bottom_plane, joint_plane.ZAxis)) proj_bottom_p = proj_bottom_p.ToArray() finger_box_p = box = rg.Brep.CreateFromBox(proj_top_p[0:4] + proj_bottom_p[0:4]) #if (finger_length_1 == 'default') or (finger_length_1 == 0) : top_plane = rs.coerceplane(self.plates[nb].top_plane) bottom_plane = rs.coerceplane(self.plates[nb].bottom_plane) finger_box_p = Toolbox.Breps.slice_2_planes(finger_box_p, top_plane, bottom_plane) self.plates[i].joints_positives.append(finger_box_p) # contour top_poly_n = rs.AddPolyline([proj_top_n[0],proj_top_n[1], proj_top_n[2], proj_top_n[3]]) bottom_poly_n = rs.AddPolyline([proj_bottom_n[0],proj_bottom_n[1], proj_bottom_n[2], proj_bottom_n[3]]) top_poly_p = rs.AddPolyline([proj_top_p[0],proj_top_p[1], proj_top_p[2], proj_top_p[3]]) bottom_poly_p = rs.AddPolyline([proj_bottom_p[0],proj_bottom_p[1], proj_bottom_p[2], proj_bottom_p[3]]) self.plates[i].top_contour = Toolbox.Curves.insert_curves(self.plates[i].top_contour, [top_poly_p]) self.plates[i].bottom_contour = Toolbox.Curves.insert_curves(self.plates[i].bottom_contour, [bottom_poly_p]) self.plates[nb].top_contour = Toolbox.Curves.insert_curves(self.plates[nb].top_contour, [top_poly_n]) self.plates[nb].bottom_contour = Toolbox.Curves.insert_curves(self.plates[nb].bottom_contour, [bottom_poly_n]) # Structural analysis for k in range(len(location_1)): pm=rs.CurveClosestPoint(self.FEM_plates[i],location_1[k]) pf=rs.CurveClosestPoint(self.FEM_plates[nb],location_1[k]) """ self.FEM_plates[i] = scriptcontext.doc.Objects.Add(self.FEM_plates[i]) self.FEM_plates[nb] = scriptcontext.doc.Objects.Add(self.FEM_plates[nb]) joint_line = rs.AddLine(rs.EvaluateCurve(self.FEM_plates[i],pm), rs.EvaluateCurve(self.FEM_plates[nb],pf)) rs.InsertCurveKnot(self.FEM_plates[i],pm) rs.InsertCurveKnot(self.FEM_plates[nb],pf) self.FEM_plates[i] = rs.coercecurve(self.FEM_plates[i]) self.FEM_plates[nb] = rs.coercecurve(self.FEM_plates[nb]) self.FEM_joints.append(rs.coercecurve(joint_line)) for k in range(len(location_2)): pm=rs.CurveClosestPoint(self.FEM_plates[i],location_2[k]) pf=rs.CurveClosestPoint(self.FEM_plates[nb],location_2[k]) self.FEM_plates[i] = scriptcontext.doc.Objects.Add(self.FEM_plates[i]) self.FEM_plates[nb] = scriptcontext.doc.Objects.Add(self.FEM_plates[nb]) joint_line = rs.AddLine(rs.EvaluateCurve(self.FEM_plates[i],pm), rs.EvaluateCurve(self.FEM_plates[nb],pf)) rs.InsertCurveKnot(self.FEM_plates[i],pm) rs.InsertCurveKnot(self.FEM_plates[nb],pf) self.FEM_plates[i] = rs.coercecurve(self.FEM_plates[i]) self.FEM_plates[nb] = rs.coercecurve(self.FEM_plates[nb]) self.FEM_joints.append(rs.coercecurve(joint_line)) """ @__skip_nones def add_halflap(self, plates_pairs='all', proportion = 0.5, tolerance = 0.0, min_angle = 45.0, straight_height = 0.0, fillet_height = 0.0, segments = 1): """Add half-lap joints on Intersecting Plates.""" #cast plate_pairs to string if plates_pairs != 'all': for i in range(len(plates_pairs)): plates_pairs[i] = str(plates_pairs[i])
data points for each node.\ They are archetypal for what the node represents and what subgroup of\ the data it encapsulates.') header = context.feature_names representatives = np.array( [np.round(d['representative'][0], 2) for d in desc['nodes']]) cells = representatives.T plot = p.plot_table(header, cells) # plot['layout']['title'] = 'The representatives (most likely instances) of each node' iplot(plot) spn.root = root def get_node_description(spn, parent_node, size): # parent_node.validate() parent_type = type(parent_node).__name__ node_descriptions = dict() node_descriptions['num'] = len(parent_node.children) nodes = list() for i, node in enumerate(parent_node.children): node_spn = Copy(node) assign_ids(node_spn) node_dir = dict() node_dir['weight'] = parent_node.weights[i] if parent_type == 'Sum' else 1 node_dir['size'] = get_number_of_nodes(node) - 1 node_dir['num_children'] = len(node.children) if not isinstance(node, Leaf) else 0 node_dir['leaf'] = isinstance(node, Leaf) node_dir['type'] = type(node).__name__ + ' Node' node_dir['split_features'] = [list(c.scope) for c in node.children] if not isinstance(node, Leaf) else node.scope node_dir['split_features'].sort(key=lambda x: len(x)) node_dir['depth'] = get_depth(node) node_dir['child_depths'] = [get_depth(c) for c in node.children] descriptor = node_dir['type'] if all((d == 0 for d in node_dir['child_depths'])): descriptor = 'shallow ' + descriptor node_dir['quick'] = 'shallow' elif len([d for d in node_dir['child_depths'] if d == 0]) == 1: node_dir['quick'] = 'split_one' descriptor += ', which separates one feature' else: node_dir['quick'] = 'deep' descriptor = 'deep ' + descriptor descriptor = 'a ' + descriptor node_dir['descriptor'] = descriptor node_dir['short_descriptor'] = descriptor node_dir['representative'] = mpe(node_spn, np.array([[np.nan] * size])) nodes.append(node_dir) node_descriptions['shallow'] = len([d for d in nodes if d['quick'] == 'shallow']) node_descriptions['split_one'] = len([d for d in nodes if d['quick'] == 'split_one']) node_descriptions['deep'] = len([d for d in nodes if d['quick'] == 'deep']) nodes.sort(key=lambda x: x['weight']) nodes.reverse() node_descriptions['nodes'] = nodes return node_descriptions def show_node_separation(spn, nodes, context): categoricals = get_categoricals(spn, context) all_features = spn.scope feature_names = context.feature_names if features_shown == 'all': shown_features = all_features elif isinstance(features_shown, int): num_choices = min(features_shown, len(all_features)) shown_features = random.sample(all_features, k=num_choices) else: shown_features = features_shown node_means = np.array([get_mean(node).reshape(-1) for node in nodes]) node_vars = np.array([get_variance(node).reshape(-1) for node in nodes]) node_stds = np.sqrt(node_vars) names = np.arange(1,len(nodes)+1,1) strength_separation = cluster_anova(spn) node_var, node_mean = cluster_mean_var_distance(nodes, spn) all_seps = {i: separation for i, separation in zip(shown_features, strength_separation)} for i in shown_features: if i not in categoricals: description_string = '' plot = p.plot_error_bar(names, node_means[:,i], node_vars[:,i], feature_names[i]) strength = ['weak', 'moderate', 'strong', 'very strong', 'perfect'] strength_values = [0.3, 0.6, 0.8, 0.99] strength_adv = strength[threshold(strength_values, strength_separation[i])]+'ly' var_outliers = np.where(node_var[:,i] > variance_threshold)[0] if len(var_outliers) == 1: node_string = ', '.join([str(v) for v in var_outliers]) description_string += 'The variance of node {} is significantly larger then the average node. '.format(node_string) elif len(var_outliers) > 0: node_string = ', '.join([str(v) for v in var_outliers]) description_string += 'The variances of the nodes {} are significantly larger then the average node. '.format(node_string) mean_high_outliers = np.where(node_mean[:,i] > mean_threshold)[0] mean_low_outliers = np.where(node_mean[:,i] < -mean_threshold)[0] if len(mean_high_outliers) == 1: node_string = ', '.join([str(v) for v in mean_high_outliers]) description_string += 'The mean of node {} is significantly larger then the average node. '.format(node_string) elif len(mean_high_outliers) > 0: node_string = ', '.join([str(v) for v in mean_high_outliers]) description_string += 'The means of the nodes {} are significantly larger then the average node. '.format(node_string) if len(mean_low_outliers) == 1: node_string = ', '.join([str(v) for v in mean_low_outliers]) description_string += 'The mean of node {} is significantly smaller then the average node.'.format(node_string) elif len(mean_low_outliers) > 0: node_string = ', '.join([str(v) for v in mean_low_outliers]) description_string += 'The means of the nodes {} are significantly smaller then the average node.'.format(node_string) if description_string or strength_separation[i] > separation_threshold: description_string = 'The feature "{}" is {} separated by the clustering. '.format(feature_names[i], strength_adv) + description_string iplot(plot) printmd(description_string) return all_seps def node_categorical_description(spn, dictionary): context = dictionary['context'] categoricals = get_categoricals(spn, context) feature_names = context.feature_names enc = [dictionary['features'][cat]['encoder'] for cat in categoricals] summarized, contributions = categorical_nodes_description(spn, context) for i, cat in enumerate(categoricals): printmd('#### Distribution of {}'.format(feature_names[cat])) for cat_instance in [int(c) for c in context.get_domains_by_scope([cat])[0]]: name = enc[i].inverse_transform([cat_instance]) contrib_nodes = summarized[cat]['contrib'][cat_instance][0] prop_of_instance = summarized[cat]['explained'][cat_instance][cat_instance] prop_of_nodes = prop_of_instance / np.sum( summarized[cat]['explained'][cat_instance]) if prop_of_instance < 0.7: printmd('The feature "{}" is not separated well along the primary\ clusters.'.format(feature_names[cat])) break else: desc = '{}% of "{}" is captured by the nodes {}. The probability of\ "{}" for this group of nodes is {}%' printmd(desc.format(np.round(prop_of_instance * 100, 2), name, ', '.join([str(n) for n in contrib_nodes]), name, np.round(prop_of_nodes * 100, 2), )) def classification(spn, numerical_data, dictionary): context = dictionary['context'] categoricals = get_categoricals(spn, context) misclassified = {} data_dict = {} for i in categoricals: y_true = numerical_data[:, i].reshape(-1, 1) query = np.copy(numerical_data) y_pred = predict_mpe(spn, i, query, context).reshape(-1, 1) misclassified[i] = np.where(y_true != y_pred)[0] misclassified_instances = misclassified[i].shape[0] data_dict[i] = np.concatenate((query[:, :i], y_pred, query[:, i+1:]), axis=1) printmd('For feature "{}" the SPN misclassifies {} instances, resulting in a precision of {}%.'.format( context.feature_names[i], misclassified_instances, np.round(100 * (1 - misclassified_instances/len(numerical_data)),2))) return misclassified, data_dict def describe_misclassified(spn, dictionary, misclassified, data_dict, numerical_data): context = dictionary['context'] categoricals = get_categoricals(spn, context) empty = np.array([[np.nan] * len(spn.scope)]) for i in categoricals: if use_shapley: raise NotImplementedError else: if misclassified_explanations == 'all': show_misclassified = misclassified[i] elif isinstance(misclassified_explanations, int): num_choices = min(misclassified_explanations, len(misclassified[i])) show_misclassified = random.sample(misclassified[i].tolist(), k=num_choices) else: show_misclassified = misclassified_explanations for inst_num in show_misclassified: instance = data_dict[i][inst_num:inst_num + 1] evidence = instance.copy() evidence[:, i] = np.nan prior = log_likelihood(spn, evidence) posterior = log_likelihood(spn, instance) total = 0 all_nodes = [] for j, node in enumerate(spn.children): node_prob = np.exp(np.log(spn.weights[j]) + log_likelihood(spn, instance) - posterior) total += node_prob all_nodes.append((node_prob, j)) all_nodes.sort() all_nodes.reverse() needed_nodes = [] all_reps = [] total_prob = 0 for prob, idx in all_nodes: node = Copy(spn.children[idx]) assign_ids(node) total_prob += prob needed_nodes.append(idx) all_reps.append(mpe(node, empty)[0]) if total_prob > 0.9: break real_value = dictionary['features'][i][ 'encoder'].inverse_transform( [int(numerical_data[inst_num, i])]) pred_value = dictionary['features'][i][ 'encoder'].inverse_transform( [int(data_dict[i][inst_num, i])]) printmd( 'Instance {} was predicted as "{}", even though it is "{}", because it was most similar to the following clusters: {}'.format( inst_num, pred_value, real_value, ', '.join(map(str, needed_nodes)))) all_reps = np.array(all_reps).reshape(len(needed_nodes), len(spn.scope)) table = np.round(np.concatenate([instance, all_reps], axis=0), 2) node_nums = np.array(['instance'] + needed_nodes).reshape(-1, 1) table = np.append(node_nums, table, axis=1) iplot(p.plot_table([''] + context.feature_names, table.transpose())) def explanation_vector_description(spn, dictionary, data_dict, cat_features, use_shap=False): context = dictionary['context'] categoricals = get_categoricals(spn, context) num_features = len(spn.scope) feature_types = context.parametric_types domains = context.get_domains_by_scope(spn.scope) feature_names = context.feature_names all_combinations = list(itertools.product(categoricals, list(range(num_features)))) if explanation_vectors_show == 'all': shown_combinations = all_combinations elif isinstance(explanation_vectors_show, int): num_choices = min(explanation_vectors_show, len(all_combinations)) shown_combinations = random.sample(all_combinations, k=num_choices) else: shown_combinations = features_shown if explanation_vector_classes: shown_classes = explanation_vector_classes else: shown_classes = categoricals def plot_query(query, data, query_dict): if len(query[0]) == 0: return None conditional_evidence = np.full((1, num_features), np.nan) conditional_evidence[:, i] = data[0,i] gradients = fast_conditional_gradient(spn, conditional_evidence, data[query]) gradients_norm = np.linalg.norm(gradients, axis = 1).reshape(-1,1) _gradients = (gradients/gradients_norm)[:,k] discretize = np.histogram(_gradients, range=(-1,1), bins = 20) binsize = discretize[1][1] - discretize[1][0] if np.abs(_gradients.mean()) < explanation_vector_threshold: return _gradients header, description, plot = explanation_vector(_gradients, discretize, data, query, query_dict) if not header: return _gradients printmd(header) iplot(plot) printmd(description) return _gradients all_gradients = {} for i in shown_classes: all_gradients[i] = {} for j in domains[i]: all_gradients[i][j] = {} printmd('#### Class "{}": "{}"'.format( feature_names[i], dictionary['features'][i]['encoder'].inverse_transform([int(j)]))) test_query = np.where((data_dict[i][:,i] == j)) if len(test_query[0]) == 0: printmd('For this particular class instance, no instances of the predicted data were found. \ This might be because the predictive precision of the network was not high enough.') continue #if use_shap: # shapley_values = shap_sampling(spn, ) for k in range(num_features - 1): all_gradients[i][j][k] = {} this_range = [x for x in range(num_features) if x != i] instance = this_range[k] if (i,k) not in shown_combinations: continue if instance in categoricals: plot_data = [] for l in domains[instance]: query = np.where((data_dict[i][:,i] == j) & (data_dict[i][:,instance] == l)) query_dict = {'type': 'categorical', 'class': feature_names[i], 'class_instance': dictionary['features'][i]['encoder'].inverse_transform( [int(j)]), 'feature': feature_names[instance], 'feature_instance': dictionary['features'][instance]['encoder'].inverse_transform( [int(l)]), 'feature_idx': instance, 'class_idx': i} data = data_dict[i][query] if data.size == 0: continue evidence = np.full((1, data.shape[1]), np.nan) evidence[:, i] = data[0, i] if use_shap: gradients = shap_sampling(spn, data, i, N=10) else: gradients = fast_conditional_gradient(spn, evidence, data) gradients_norm = np.linalg.norm(gradients, axis = 1).reshape(-1,1) _gradients = (gradients/gradients_norm)[:,k] discretize = np.histogram(_gradients, range=(-1,1), bins = 10) binsize = discretize[1][1] - discretize[1][0] plot_data.append((_gradients, discretize, query_dict['feature_instance'])) plot = p.plot_cat_explanation_vector(plot_data) header = '##### Predictive categorical feature "{}"\n\n'.format( query_dict['feature']) printmd(header) iplot(plot)
<filename>ganimides_server/ganimides_openBankingAPI/_ganimides_openBankingAPI_init_test.py #!flask/bin/python import os import sys if not (os.path.dirname(os.path.dirname(__file__)) in sys.path): sys.path.append(os.path.dirname(os.path.dirname(__file__))) if not (os.path.dirname(__file__) in sys.path): sys.path.append(os.path.dirname(__file__)) #print(sys.path) from colorama import Fore import datetime # import secrets # import requests # from flask import Flask, jsonify, abort, request, make_response, url_for,redirect # from flask_httpauth import HTTPBasicAuth # from flask import json # from flask import session, g from _serverApp import thisApp from _serverApp import build_process_signature, build_process_call_area, get_debug_level, get_debug_files from _serverApp import log_process_start, log_process_finish, log_process_message from _serverApp import Fore import ganimides_database as db import _ganimides_openBankingAPI as api #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def recreate_tables(): db.check_table(db.BANK_AUTHORIZATIONS,auto_synchronize=True,synchronization_method='drop',copy_records=True,silent=True) db.check_table(db.BANK_SUBSCRIPTIONS,auto_synchronize=True,synchronization_method='drop',copy_records=True,silent=True) db.check_table(db.BANK_ACCOUNTS,auto_synchronize=True,synchronization_method='drop',copy_records=True,silent=True) db.recreate_tables(db.db_schema_Base,db.engine) db.BANK_AUTHORIZATIONS.delete_rows() db.BANK_SUBSCRIPTIONS.delete_rows() db.BANK_ACCOUNTS.delete_rows() #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def clear_tables(): db.BANK_AUTHORIZATIONS.clear_table() db.BANK_SUBSCRIPTIONS.clear_table() db.BANK_ACCOUNTS.clear_table() #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def create_merchant_subscriptions(): #and request client authorization print(f'{Fore.LIGHTWHITE_EX}create merchant subscriptions:') start_time=datetime.datetime.now() dbsession = db.get_dbsession(**_process_call_area) merchant_id = '6d1d1a14-e91b-11e9-aae5-33b843d61993' bank_id = 'bankofcyprus' dbreply = db.dbapi_merchant(dbsession,'get',{'merchant_id':merchant_id},caller_area=_process_call_area) client_id = dbreply.get('api_data', {}).get('client_id') if not client_id: msg = f'merchant {merchant_id} not found' print(msg) else: res = api.banksubscription_register(dbsession, client_id=client_id, bank_id=bank_id, application_name=application_name, allow_transactionHistory=True, allow_balance=True, allow_details=True, allow_checkFundsAvailability=False, payments_limit=0, payments_currency='EUR', payments_amount=0 ) print(res) banks = dbsession.get_rows(db.BANK, {'status':'Active'}, caller_area=_process_call_area) merchants = dbsession.get_rows(db.MERCHANT, {}, caller_area=_process_call_area) for ix1 in range(0, len(merchants)): merchant = merchants[ix1] dbreply = db.dbapi_merchant(dbsession,'get',{'merchant_id':merchant.merchant_id},caller_area=_process_call_area) client_id = dbreply.get('api_data', {}).get('client_id') if not client_id: msg = f'merchant {merchant_id} not found' print(msg) else: for ix2 in range(0,len(banks)): bank = banks[ix2] bank_id = bank.bank_id res = api.banksubscription_register(dbsession, client_id=client_id, bank_id=bank_id, application_name=application_name, allow_transactionHistory=True, allow_balance=True, allow_details=True, allow_checkFundsAvailability=False, payments_limit=0, payments_currency='EUR', payments_amount=0 ) print(res) dbsession.close(**_process_call_area) diff = datetime.datetime.now() - start_time duration = diff.days * 24 * 60 * 60 + diff.seconds print(f'{Fore.MAGENTA}duration :',duration) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def create_client_subscriptions(): #and request client auth print(f'{Fore.LIGHTWHITE_EX}create client subscriptions:') start_time=datetime.datetime.now() dbsession = db.get_dbsession(**_process_call_area) emails = ['<EMAIL>', '<EMAIL>', '<EMAIL>', '<EMAIL>'] for email in emails: client = dbsession.get(db.CLIENT, {'email': email}, caller_area=_process_call_area) if client: client_id = client.client_id bank_id = 'bankofcyprus' res = api.banksubscription_register(dbsession, client_id=client_id, bank_id=bank_id, application_name=application_name, allow_transactionHistory=True, allow_balance=True, allow_details=True, allow_checkFundsAvailability=False, payments_limit=0, payments_currency='EUR', payments_amount=0 ) print(res) dbsession.close(**_process_call_area) diff = datetime.datetime.now() - start_time duration = diff.days * 24 * 60 * 60 + diff.seconds print(f'{Fore.MAGENTA}duration :',duration) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def receive_authorizations_from_clients(): print(f'{Fore.LIGHTWHITE_EX}receive authorizations from clients:') start_time=datetime.datetime.now() dbsession = db.get_dbsession(**_process_call_area) bank_id = 'bankofcyprus' authorization_code = '1212122121simulated_authorization_code2121212112121' _process_call_area.update({'simulation_enabled':True}) reply=api.banksubscription_receive_authorization_from_client(dbsession, bank_id, authorization_code, caller_area=_process_call_area) print(reply.get('api_message')) dbsession.close(**_process_call_area) diff = datetime.datetime.now() - start_time duration = diff.days * 24 * 60 * 60 + diff.seconds print(f'{Fore.MAGENTA}duration :',duration) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def create_banksubscription_only(): print(f'{Fore.LIGHTWHITE_EX}create bank subscription only:') start_time=datetime.datetime.now() dbsession = db.get_dbsession(**_process_call_area) application_name='scanandpay_client' emails = ['<EMAIL>', '<EMAIL>', '<EMAIL>', '<EMAIL>'] for email in emails: client = dbsession.get(db.CLIENT, {'email': email}, caller_area=_process_call_area) client_id=client.client_id # client_id='685e0b46-e91b-11e9-bea1-2db812eac691' bank_id = 'bankofcyprus' reply=api.banksubscription_create(dbsession, client_id, bank_id, application_name, allow_transactionHistory=True, allow_balance=True, allow_details=True, allow_checkFundsAvailability=True, payments_limit=1000, payments_currency='EUR', payments_amount=100, caller_area=_process_call_area) subscription_id = reply.get('bank_subscriptionID') print(reply.get('api_message'),'created subscription_id:',subscription_id) dbsession.close(**_process_call_area) diff = datetime.datetime.now() - start_time duration = diff.days * 24 * 60 * 60 + diff.seconds print(f'{Fore.MAGENTA}duration :',duration) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def request_authorization_from_client_only(): print(f'{Fore.LIGHTWHITE_EX}request athorization from client only:') start_time = datetime.datetime.now() dbsession = db.get_dbsession(**_process_call_area) bank_id = 'bankofcyprus' application_name='scanandpay_client' emails = ['<EMAIL>', '<EMAIL>', '<EMAIL>', '<EMAIL>'] for email in emails: client = dbsession.get(db.CLIENT, {'email': email}, caller_area=_process_call_area) client_id=client.client_id reply=api.banksubscription_create(dbsession, client_id, bank_id, application_name, allow_transactionHistory=True, allow_balance=True, allow_details=True, allow_checkFundsAvailability=True, payments_limit=1000, payments_currency='EUR', payments_amount=100, caller_area=_process_call_area) if not reply.get('api_status') == 'success': msg = 'create subs FAILED.' print(Fore.RED, msg) else: api_data = reply.get('api_data', {}) subscription_id = api_data.get('bank_subscriptionID') client_id = api_data.get('client_id') bank_id = api_data.get('bank_id') application_name = api_data.get('application_name') reply = api.banksubscription_request_authorization_from_client(dbsession, client_id, bank_id, subscription_id, application_name, caller_area=_process_call_area) print(reply.get('api_message')) dbsession.close(**_process_call_area) diff = datetime.datetime.now() - start_time duration = diff.days * 24 * 60 * 60 + diff.seconds print(f'{Fore.MAGENTA}duration :',duration) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: def unregister_banksubscription(): print(f'{Fore.LIGHTWHITE_EX}unregister bank subscriptions:') start_time=datetime.datetime.now() dbsession = db.get_dbsession(**_process_call_area) sid='6ab80c36-e9de-11e9-8aae-fbff3f03e211' res=api.banksubscription_unregister(dbsession, client_id={}, bank_id={}, application_name={}, subscription_id=sid, caller_area={}) print(res.get('api_message')) sid='28ddd8c6-e9db-11e9-bae8-e105a87466ca' res=api.banksubscription_unregister(dbsession, client_id={}, bank_id={}, application_name={}, subscription_id=sid, caller_area={}) print(res.get('api_message')) dbsession.close(**_process_call_area) diff = datetime.datetime.now() - start_time duration = diff.days * 24 * 60 * 60 + diff.seconds print(f'{Fore.MAGENTA}duration :',duration) #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: # main #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: if __name__ == '__main__': module_id='dummy' caller_area={'debug_level':-1,'debug_template':'SESSION_ONLY'} caller_area={'debug_level':-1,'debug_template':'FULL'} caller_area = {'debug_level': 99, 'caller_area_input_debug': False,} device_uid = 'qeqeqwe213123132213afasfasdffds' geolocation_lat = '1212.114213' geolocation_lon = '14567.234324234' application_name = 'scanandpay_client' application_client_id = '67a9fd9a-e91b-11e9-a7f2-75d9d55de53b' application_client_secretKey = '<KEY>' client_secretKey = '121212112121212' client_id = '121212112121212' user_id = '6701d6a8-e91b-11e9-98c9-79d91b2c4899' caller_area = { 'application_name': application_name, 'application_client_id': application_client_id, 'application_client_secretKey': application_client_secretKey, 'client_id': client_id, 'client_secretKey': client_secretKey, 'user_id': user_id, 'device_uid': device_uid, 'geolocation_lat': geolocation_lat, 'geolocation_lon': geolocation_lon, } caller_area.update({'debug_level': 0, 'caller_area_input_debug': False,}) _process_name = "test_openbanking_apis" _process_entity = '' _process_action = 'test' _process_msgID = 'process: test_openbanking_apis' _process_identity_kwargs = {'type': 'process', 'module': module_id, 'name': _process_name, 'action': _process_action, 'entity': _process_entity, 'msgID': _process_msgID,} _process_adapters_kwargs = {'dbsession': None} _process_log_kwargs = {'indent_method': 'AUTO', 'indent_level':None} _process_debug_level = get_debug_level(caller_area.get('debug_level'), **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_files = get_debug_files(_process_debug_level, **_process_identity_kwargs, **_process_adapters_kwargs) _process_debug_kwargs={'debug_level':_process_debug_level,'debug_files':_process_debug_files} _process_signature = build_process_signature(**_process_identity_kwargs, **_process_adapters_kwargs, **_process_debug_kwargs, **_process_log_kwargs) _process_call_area = build_process_call_area(_process_signature, caller_area) #SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS# #SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS# #SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS# _process_call_area.update({'simulation_enabled':True}) #SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS# #SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS# #SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS#SOS# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # log_process_start(_process_msgID,**_process_call_area) # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # base_time = datetime.datetime.now() #recreate_tables() #clear_tables() create_banksubscription_only() request_authorization_from_client_only() # create_merchant_subscriptions() # create_client_subscriptions() receive_authorizations_from_clients() unregister_banksubscription() diff = datetime.datetime.now() - base_time duration = diff.days * 24 * 60 * 60 + diff.seconds print(f'{Fore.YELLOW}total duration :',duration) # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # log_process_finish(_process_msgID,{},**_process_call_area) # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
robes And may not wear them. O, here comes my nurse, Enter Nurse, with cords. And she brings news; and every tongue that speaks But Romeo's name speaks heavenly eloquence. Now, nurse, what news? What hast thou there? the cords That Romeo bid thee fetch? Nurse. Ay, ay, the cords. [Throws them down.] Jul. Ay me! what news? Why dost thou wring thy hands Nurse. Ah, weraday! he's dead, he's dead, he's dead! We are undone, lady, we are undone! Alack the day! he's gone, he's kill'd, he's dead! Jul. Can heaven be so envious? Nurse. Romeo can, Though heaven cannot. O Romeo, Romeo! Who ever would have thought it? Romeo! Jul. What devil art thou that dost torment me thus? This torture should be roar'd in dismal hell. Hath Romeo slain himself? Say thou but 'I,' And that bare vowel 'I' shall poison more Than the death-darting eye of cockatrice. I am not I, if there be such an 'I'; Or those eyes shut that make thee answer 'I.' If be be slain, say 'I'; or if not, 'no.' Brief sounds determine of my weal or woe. Nurse. I saw the wound, I saw it with mine eyes, (God save the mark!) here on his manly breast. A piteous corse, a bloody piteous corse; Pale, pale as ashes, all bedaub'd in blood, All in gore-blood. I swounded at the sight. Jul. O, break, my heart! poor bankrout, break at once! To prison, eyes; ne'er look on liberty! Vile earth, to earth resign; end motion here, And thou and Romeo press one heavy bier! Nurse. O Tybalt, Tybalt, the best friend I had! O courteous Tybalt! honest gentleman That ever I should live to see thee dead! Jul. What storm is this that blows so contrary? Is Romeo slaught'red, and is Tybalt dead? My dear-lov'd cousin, and my dearer lord? Then, dreadful trumpet, sound the general doom! For who is living, if those two are gone? Nurse. Tybalt is gone, and Romeo banished; Romeo that kill'd him, he is banished. Jul. O God! Did Romeo's hand shed Tybalt's blood? Nurse. It did, it did! alas the day, it did! Jul. O serpent heart, hid with a flow'ring face! Did ever dragon keep so fair a cave? Beautiful tyrant! fiend angelical! Dove-feather'd raven! wolvish-ravening lamb! Despised substance of divinest show! Just opposite to what thou justly seem'st- A damned saint, an honourable villain! O nature, what hadst thou to do in hell When thou didst bower the spirit of a fiend In mortal paradise of such sweet flesh? Was ever book containing such vile matter So fairly bound? O, that deceit should dwell In such a gorgeous palace! Nurse. There's no trust, No faith, no honesty in men; all perjur'd, All forsworn, all naught, all dissemblers. Ah, where's my man? Give me some aqua vitae. These griefs, these woes, these sorrows make me old. Shame come to Romeo! Jul. Blister'd be thy tongue For such a wish! He was not born to shame. Upon his brow shame is asham'd to sit; For 'tis a throne where honour may be crown'd Sole monarch of the universal earth. O, what a beast was I to chide at him! Nurse. Will you speak well of him that kill'd your cousin? Jul. Shall I speak ill of him that is my husband? Ah, poor my lord, what tongue shall smooth thy name When I, thy three-hours wife, have mangled it? But wherefore, villain, didst thou kill my cousin? That villain cousin would have kill'd my husband. Back, foolish tears, back to your native spring! Your tributary drops belong to woe, Which you, mistaking, offer up to joy. My husband lives, that Tybalt would have slain; And Tybalt's dead, that would have slain my husband. All this is comfort; wherefore weep I then? Some word there was, worser than Tybalt's death, That murd'red me. I would forget it fain; But O, it presses to my memory Like damned guilty deeds to sinners' minds! 'Tybalt is dead, and Romeo- banished.' That 'banished,' that one word 'banished,' Hath slain ten thousand Tybalts. Tybalt's death Was woe enough, if it had ended there; Or, if sour woe delights in fellowship And needly will be rank'd with other griefs, Why followed not, when she said 'Tybalt's dead,' Thy father, or thy mother, nay, or both, Which modern lamentation might have mov'd? But with a rearward following Tybalt's death, 'Romeo is banished'- to speak that word Is father, mother, Tybalt, Romeo, Juliet, All slain, all dead. 'Romeo is banished'- There is no end, no limit, measure, bound, In that word's death; no words can that woe sound. Where is my father and my mother, nurse? Nurse. Weeping and wailing over Tybalt's corse. Will you go to them? I will bring you thither. Jul. Wash they his wounds with tears? Mine shall be spent, When theirs are dry, for Romeo's banishment. Take up those cords. Poor ropes, you are beguil'd, Both you and I, for Romeo is exil'd. He made you for a highway to my bed; But I, a maid, die maiden-widowed. Come, cords; come, nurse. I'll to my wedding bed; And death, not Romeo, take my maidenhead! Nurse. Hie to your chamber. I'll find Romeo To comfort you. I wot well where he is. Hark ye, your Romeo will be here at night. I'll to him; he is hid at Laurence' cell. Jul. O, find him! give this ring to my true knight And bid him come to take his last farewell. Exeunt. Scene III. Friar Laurence's cell. Enter Friar [Laurence]. Friar. Romeo, come forth; come forth, thou fearful man. Affliction is enanmour'd of thy parts, And thou art wedded to calamity. Enter Romeo. Rom. Father, what news? What is the Prince's doom What sorrow craves acquaintance at my hand That I yet know not? Friar. Too familiar Is my dear son with such sour company. I bring thee tidings of the Prince's doom. Rom. What less than doomsday is the Prince's doom? Friar. A gentler judgment vanish'd from his lips- Not body's death, but body's banishment. Rom. Ha, banishment? Be merciful, say 'death'; For exile hath more terror in his look, Much more than death. Do not say 'banishment.' Friar. Hence from Verona art thou banished. Be patient, for the world is broad and wide. Rom. There is no world without Verona walls, But purgatory, torture, hell itself. Hence banished is banish'd from the world, And world's exile is death. Then 'banishment' Is death misterm'd. Calling death 'banishment,' Thou cut'st my head off with a golden axe And smilest upon the stroke that murders me. Friar. O deadly sin! O rude unthankfulness! Thy fault our law calls death; but the kind Prince, Taking thy part, hath rush'd aside the law, And turn'd that black word death to banishment. This is dear mercy, and thou seest it not. Rom. 'Tis torture, and not mercy. Heaven is here, Where Juliet lives; and every cat and dog And little mouse, every unworthy thing, Live here in heaven and may look on her; But Romeo may not. More validity, More honourable state, more courtship lives In carrion flies than Romeo. They may seize On the white wonder of dear Juliet's hand And steal immortal blessing from her lips, Who, even in pure and vestal modesty, Still blush, as thinking their own kisses sin; But Romeo may not- he is banished. This may flies do, when I from this must fly; They are free men, but I am banished. And sayest thou yet that exile is not death? Hadst thou no poison mix'd, no sharp-ground knife, No sudden mean of death, though ne'er so mean, But 'banished'
value of 0.5. regul_Sigma : float (optional) Regularization parameter for Sigma. Try first a value of 0.001. min_val_C : float (optional) Minimum value to bound connectivity estimate. This should be zero or slightly negative (too negative limit can bring to an inhibition dominated system). If the empirical covariance has many negative entries then a slightly negative limit can improve the estimation accuracy. max_val_C : float (optional) Maximum value to bound connectivity estimate. This is useful to avoid large weight that make the system unstable. If the estimated connectivity saturates toward this value (it usually doesn't happen) it can be increased. max_iter : integer (optional) Number of maximum optimization steps. If final number of iterations reaches this maximum it means the algorithm has not converged. min_iter : integer (optional) Number of minimum optimization steps before testing if end of optimization (increase of model error). Returns ------- J : ndarray of rank 2 The estimated Jacobian. Shape [n_nodes, n_nodes] Sigma : ndarray of rank 2 Estimated noise covariance. Shape [n_nodes, n_nodes] d_fit : dictionary A dictionary with diagnostics of the fit. Keys are: ['iterations', 'distance', 'correlation']. """ # TODO: make better graphics (deal with axes separation, etc.) if (not type(i_tau_opt) == int) or (i_tau_opt <= 0): raise ValueError('Scalar value i_tau_opt must be non-zero') # Objective FC matrices (empirical) Q0_obj = Q_obj[0] Qtau_obj = Q_obj[i_tau_opt] # Autocovariance time constant (exponential decay) log_ac = np.log( np.maximum( Q_obj.diagonal(axis1=1,axis2=2), 1e-10 ) ) v_tau = np.arange(Q_obj.shape[0], dtype=np.float) lin_reg = np.polyfit( np.repeat(v_tau, self.n_nodes), log_ac.reshape(-1), 1 ) tau_obj = -1.0 / lin_reg[0] # coefficients to balance the model error between Q0 and Qtau norm_Q0_obj = np.linalg.norm(Q0_obj) norm_Qtau_obj = np.linalg.norm(Qtau_obj) # mask for existing connections for EC and Sigma mask_diag = np.eye(self.n_nodes, dtype=bool) if mask_C is None: # Allow all possible connections to be tuned except self-connections (on diagonal) mask_C = np.logical_not(mask_diag) if mask_Sigma is None: # Independent noise (no cross-covariances for Sigma) mask_Sigma = np.eye(self.n_nodes, dtype=bool) # Initialise network and noise. Give initial parameters C = np.zeros([self.n_nodes, self.n_nodes], dtype=np.float) tau_x = np.copy(tau_obj) Sigma = np.eye(self.n_nodes, dtype=np.float) # Best distance between model and empirical data best_dist = 1e10 best_Pearson = 0.0 # Arrays to record model parameters and outputs # model error = matrix distance between FC matrices dist_Q_hist = np.zeros([max_iter], dtype=np.float) # Pearson correlation between model and objective FC matrices Pearson_Q_hist = np.zeros([max_iter], dtype=np.float) # identity matrix id_mat = np.eye(self.n_nodes, dtype=np.float) # run the optimization process stop_opt = False i_iter = 0 while not stop_opt: # calculate Jacobian of dynamical system J = -id_mat / tau_x + C # Calculate Q0 and Qtau for model Q0 = spl.solve_continuous_lyapunov(J.T, -Sigma) Qtau = np.dot( Q0, spl.expm( J * i_tau_opt ) ) # difference matrices between model and objectives Delta_Q0 = Q0_obj - Q0 Delta_Qtau = Qtau_obj - Qtau # Calculate error between model and empirical data for Q0 and FC_tau (matrix distance) dist_Q0 = np.linalg.norm(Delta_Q0) / norm_Q0_obj dist_Qtau = np.linalg.norm(Delta_Qtau) / norm_Qtau_obj dist_Q_hist[i_iter] = 0.5 * (dist_Q0 + dist_Qtau) # Calculate corr between model and empirical data for Q0 and FC_tau Pearson_Q0 = stt.pearsonr( Q0.reshape(-1), Q0_obj.reshape(-1) )[0] Pearson_Qtau = stt.pearsonr( Qtau.reshape(-1), Qtau_obj.reshape(-1) )[0] Pearson_Q_hist[i_iter] = 0.5 * (Pearson_Q0 + Pearson_Qtau) # Best fit given by best Pearson correlation coefficient # for both Q0 and Qtau (better than matrix distance) if dist_Q_hist[i_iter] < best_dist: best_dist = dist_Q_hist[i_iter] best_Pearson = Pearson_Q_hist[i_iter] J_best = np.copy(J) Sigma_best = np.copy(Sigma) else: # wait at least 5 optimization steps before stopping stop_opt = i_iter > min_iter # Jacobian update with weighted FC updates depending on respective error Delta_J = np.dot( np.linalg.pinv(Q0), Delta_Q0 ) + np.dot( Delta_Q0, np.linalg.pinv(Q0) ) \ + np.dot( np.linalg.pinv(Qtau), Delta_Qtau ) + np.dot( Delta_Qtau, np.linalg.pinv(Qtau) ) # Update effective conectivity matrix (regularization is L2) C[mask_C] += epsilon_C * ( Delta_J - regul_C * C )[mask_C] C[mask_C] = np.clip(C[mask_C], min_val_C, max_val_C) # Update noise matrix Sigma (regularization is L2) Delta_Sigma = - np.dot(J.T, Delta_Q0) - np.dot(Delta_Q0, J) Sigma[mask_Sigma] += epsilon_Sigma * ( Delta_Sigma - regul_Sigma * Sigma )[mask_Sigma] Sigma[mask_diag] = np.maximum(Sigma[mask_diag], min_val_Sigma_diag) # Check if max allowed number of iterations have been reached if i_iter >= max_iter-1: stop_opt = True print("Optimization did not converge. Maximum number of iterations arrived.") # Check if iteration has finished or still continues if stop_opt: self.d_fit['iterations'] = i_iter+1 self.d_fit['distance'] = best_dist self.d_fit['correlation'] = best_Pearson self.d_fit['distance history'] = dist_Q_hist self.d_fit['correlation history'] = Pearson_Q_hist else: i_iter += 1 # Save the results and return self.J = J_best # matrix self.Sigma = Sigma_best # matrix return self def fit_moments(self, Q0_obj, Q1_obj, mask_C=None): """ Estimation of MOU parameters (connectivity C, noise covariance Sigma, and time constant tau_x) with moments method. Parameters ---------- Q0_obj : ndarray of rank 2 The zero-lag covariance matrix of the time series to fit. Q1_obj : ndarray of rank 2 The 1-lag covariance matrix of the time series to fit. mask_C : boolean ndarray of rank-2 (optional) Mask of known non-zero values for connectivity matrix, for example estimated by DTI. Returns ------- J : ndarray of rank 2 The estimated Jacobian. Shape [n_nodes, n_nodes] Sigma : ndarray of rank 2 Estimated noise covariance. Shape [n_nodes, n_nodes] d_fit : dictionary A dictionary with diagnostics of the fit. Keys are: ['iterations', 'distance', 'correlation']. """ # Jacobian estimate inv_Q0 = np.linalg.inv(Q0_obj) J = spl.logm( np.dot(inv_Q0, Q1_obj) ) # Sigma estimate Sigma = - np.dot(J.conjugate(), Q0_obj) - np.dot(Q0_obj, J) # masks for existing positions mask_diag = np.eye(self.n_nodes, dtype=np.bool) if mask_C is None: # Allow all possible connections to be tuned except self-connections (on diagonal) mask_C = np.logical_not(mask_diag) # cast to real matrices if np.any(np.iscomplex(J)): print("Warning: complex values in J; casting to real!") J_best = np.real(J) J_best[np.logical_not(np.logical_or(mask_C,mask_diag))] = 0 if np.any(np.iscomplex(Sigma)): print("Warning: complex values in Sigma; casting to real!") Sigma_best = np.real(Sigma) # model theoretical covariances with real J and Sigma Q0 = spl.solve_continuous_lyapunov(J_best.T, -Sigma_best) Q1 = np.dot( Q0, spl.expm(J_best) ) # Calculate error between model and empirical data for Q0 and FC_tau (matrix distance) dist_Q0 = np.linalg.norm(Q0 - Q0_obj) / np.linalg.norm(Q0_obj) dist_Qtau = np.linalg.norm(Q1 - Q1_obj) / np.linalg.norm(Q1_obj) self.d_fit['distance'] = 0.5 * (dist_Q0 + dist_Qtau) # Average correlation between empirical and theoretical Pearson_Q0 = stt.pearsonr( Q0.reshape(-1), Q0_obj.reshape(-1) )[0] Pearson_Qtau = stt.pearsonr( Q1.reshape(-1), Q1_obj.reshape(-1) )[0] self.d_fit['correlation'] = 0.5 * (Pearson_Q0 + Pearson_Qtau) # Save the results and return self.J = J_best # matrix self.Sigma = Sigma_best # matrix return self def score(self): """ Returns the correlation between goodness of fit of the MOU to the data, measured by the Pearson correlation between the obseved covariances and the model covariances. """ try: return self.d_fit['correlation'] except: print('The model should be fitted first.') return np.nan ## GORKA: Shall this raise a RunTimeWarning or other type of warning? def model_covariance(self, tau=0.0): """ Calculates theoretical (lagged) covariances of the model given the parameters (forward step). Notice that this is not the empirical covariance matrix as estimated from simulated time series. Parameters ---------- tau : scalar The time lag to calculate the covariance. It can be a positive or negative. Returns ------- FC : ndarray of rank-2 The (lagged) covariance matrix. """ # Calculate zero lag-covariance Q0 by solving Lyapunov equation Q0 = spl.solve_continuous_lyapunov(self.J.T, -self.Sigma) # Calculate the effect of the lag (still valid for tau = 0.0) if tau >= 0.0: return np.dot(Q0, spl.expm(tau * self.J)) else: return np.dot(spl.expm(-tau * self.J.T), Q0) def simulate(self, T=100, dt=0.05, sampling_step=1., random_state=None): """ Simulate the MOU process with simple Euler integration defined by the time step. Parameters ---------- T : integer (optional) Duration of simulation. dt : scalar (optional) Integration time step. sampling_step : scalar (optional) Period for subsampling the generated time series. random_state : long or int (optional) Description here ... Returns -------- ts : ndarray of rank-2 Time series of simulated network activity of shape [T, n_nodes] Notes ----- It is possible
0 f = open(fn, 'w') print("Starting size of data frame: %i" % len(hostDF), file=f) try: os.makedirs('quiverMaps') except: print("Already have the folder quiverMaps!") for i in np.arange(len(step_sizes)): try: # if True: transient_name = SN_names[i] print("Transient: %s"% transient_name, file=f) ra = transientDF.loc[transientDF['Name'] == transient_name, 'RA'].values[0] dec = transientDF.loc[transientDF['Name'] == transient_name, 'DEC'].values[0] px = 800 g_img, wcs, g_hdu = get_clean_img(ra, dec, px, 'g') g_mask = np.ma.masked_invalid(g_img).mask r_img, wcs, r_hdu = get_clean_img(ra, dec, px, 'r') r_mask = np.ma.masked_invalid(r_img).mask i_img, wcs, i_hdu = get_clean_img(ra, dec, px, 'i') i_mask = np.ma.masked_invalid(i_img).mask #cleanup - remove the fits files when we're done using them for band in ['g', 'r', 'i']: os.remove("PS1_ra={}_dec={}_{}arcsec_{}_stack.num.fits".format(ra, dec, int(px*0.25), band)) os.remove("PS1_ra={}_dec={}_{}arcsec_{}_mask.fits".format(ra, dec, int(px*0.25), band)) os.remove("PS1_ra={}_dec={}_{}arcsec_{}.fits".format(ra, dec, int(px*0.25), band)) #os.chdir(path) # if e.errno != errno.EEXIST: # raise #os.chdir("./quiverMaps") nancount = 0 obj_interp = [] for obj in [g_img, r_img, i_img]: data = obj mean, median, std = sigma_clipped_stats(data, sigma=20.0) daofind = DAOStarFinder(fwhm=3.0, threshold=20.*std) sources = daofind(data - median) try: xvals = np.array(sources['xcentroid']) yvals = np.array(sources['ycentroid']) # for col in sources.colnames: # sources[col].info.format = '%.8g' # for consistent table output for k in np.arange(len(xvals)): tempx = xvals[k] tempy = yvals[k] yleft = np.max([int(tempy) - 7, 0]) yright = np.min([int(tempy) + 7, np.shape(data)[1]-1]) xleft = np.max([int(tempx) - 7, 0]) xright = np.min([int(tempx) + 7, np.shape(data)[1]-1]) for r in np.arange(yleft,yright+1): for j in np.arange(xleft, xright+1): if dist([xvals[k], yvals[k]], [j, r]) < 5: data[r, j] = np.nan nancount += np.sum(np.isnan(data)) positions = np.transpose((sources['xcentroid'], sources['ycentroid'])) apertures = CircularAperture(positions, r=5.) norm = ImageNormalize(stretch=SqrtStretch()) if plot: fig = plt.figure(figsize=(10,10)) ax = fig.gca() ax.imshow(data) apertures.plot(color='blue', lw=1.5, alpha=0.5) plt.axis('off') plt.savefig("quiverMaps/detectedStars_%s.png"%transient_name, bbox_inches='tight') plt.close() except: print("No stars here!", file=f) backx = np.arange(0,data.shape[1]) backy = np.arange(0, data.shape[0]) backxx, backyy = np.meshgrid(backx, backy) #mask invalid values array = np.ma.masked_invalid(data) x1 = backxx[~array.mask] y1 = backyy[~array.mask] newarr = array[~array.mask] data = interpolate.griddata((x1, y1), newarr.ravel(), (backxx, backyy), method='cubic') obj_interp.append(data) #gvar = np.var(obj_interp[0]) #gmean = np.nanmedian(obj_interp[0]) gMax = np.nanmax(obj_interp[0]) g_ZP = g_hdu.header['ZPT_0001'] r_ZP = r_hdu.header['ZPT_0001'] i_ZP = i_hdu.header['ZPT_0001'] #combining into a mean img - # m = -2.5*log10(F) + ZP gmag = -2.5*np.log10(obj_interp[0]) + g_ZP rmag = -2.5*np.log10(obj_interp[1]) + r_ZP imag = -2.5*np.log10(obj_interp[2]) + i_ZP #now the mean can be taken mean_zp = (g_ZP + r_ZP + i_ZP)/3 meanMag = (gmag + rmag + imag)/3 meanImg = 10**((mean_zp-meanMag)/2.5) #convert back to flux #meanImg = (obj_interp[0] + obj_interp[0] + obj_interp[0])/3 print("NanCount = %i"%nancount,file=f) #mean_center = np.nanmean([g_img[int(px/2),int(px/2)], i_img[int(px/2),int(px/2)], i_img[int(px/2),int(px/2)]]) #if mean_center != mean_center: # mean_center = 1.e-30 mean_center = meanImg[int(px/2),int(px/2)] print("Mean_center = %f" % mean_center,file=f) #mean, median, std = sigma_clipped_stats(meanImg, sigma=10.0) meanImg[meanImg != meanImg] = 1.e-30 mean, median, std = sigma_clipped_stats(meanImg, sigma=10.0) print("mean image = %e"% mean, file=f) aboveCount = np.sum(meanImg > 1.) aboveCount2 = np.sum(meanImg[int(px/2)-100:int(px/2)+100, int(px/2)-100:int(px/2)+100] > 1.) aboveFrac2= aboveCount2/40000 print("aboveCount = %f"% aboveCount,file=f) print("aboveCount2 = %f "% aboveCount2, file=f) totalPx = px**2 aboveFrac = aboveCount/totalPx print("aboveFrac= %f" % aboveFrac, file=f) print("aboveFrac2 = %f "% aboveFrac2, file=f) #meanImg[meanImg < 1.e-5] = 0 if ((median <15) and (np.round(aboveFrac2, 2) < 0.70)) or ((mean_center > 1.e3) and (np.round(aboveFrac,2) < 0.60) and (np.round(aboveFrac2,2) < 0.75)): bs = 15 fs = 1 if aboveFrac2 < 0.7: step_sizes[int(i)] = 2. else: step_sizes[int(i)] = 10. print("Small filter", file=f) size = 'small' elif ((mean_center > 40) and (median > 500) and (aboveFrac > 0.60)) or ((mean_center > 300) and (aboveFrac2 > 0.7)): bs = 75 #the big sources fs = 3 print("Large filter", file=f) step_sizes[int(i)] = np.max([step_sizes[int(i)], 50]) size = 'large' #if step_sizes[int(i)] == 5: # step_sizes[int(i)] *= 5 # step_sizes[int(i)] = np.min([step_sizes[int(i)], 50]) #if mean_center < 200: #far from the center with a large host # fs = 5 #elif mean_center < 5000: # step_sizes[int(i)] = np.max([step_sizes[int(i)], 50]) #size = 'large' else: bs = 40 #everything in between fs = 3 print("Medium filter", file=f) #if step_sizes[int(i)] == 5: # step_sizes[int(i)] *= 3 # step_sizes[int(i)] = np.max([step_sizes[int(i)], 25]) step_sizes[int(i)] = np.max([step_sizes[int(i)], 15]) size = 'medium' # step_sizes[int(i)] *= 3 #if (median) sigma_clip = SigmaClip(sigma=15.) bkg_estimator = MeanBackground() #bkg_estimator = BiweightLocationBackground() bkg3_g = Background2D(g_img, box_size=bs, filter_size=fs, sigma_clip=sigma_clip, bkg_estimator=bkg_estimator) bkg3_r = Background2D(r_img, box_size=bs, filter_size=fs, sigma_clip=sigma_clip, bkg_estimator=bkg_estimator) bkg3_i = Background2D(i_img, box_size=bs, filter_size=fs, sigma_clip=sigma_clip, bkg_estimator=bkg_estimator) #pretend the background is in counts too (I think it is, right?) and average in mags bkg3_g.background[bkg3_g.background < 0] = 1.e-30 bkg3_r.background[bkg3_r.background < 0] = 1.e-30 bkg3_i.background[bkg3_i.background < 0] = 1.e-30 backmag_g = -2.5*np.log10(bkg3_g.background) + g_ZP backmag_r = -2.5*np.log10(bkg3_r.background) + r_ZP backmag_i = -2.5*np.log10(bkg3_i.background) + i_ZP mean_zp = (g_ZP + r_ZP + i_ZP)/3. backmag = 0.333*backmag_g + 0.333*backmag_r + 0.333*backmag_i background = 10**(mean_zp-backmag/2.5) if plot: fig, axs = plt.subplots(1, 3, sharex=True, sharey=True,figsize=(20,10)) axs[0].imshow(bkg3_g.background) axs[0].axis('off') axs[1].imshow(bkg3_r.background) axs[1].axis('off') axs[2].imshow(bkg3_i.background) axs[2].axis('off') plt.savefig("quiverMaps/backgrounds_%s.png" % transient_name, bbox_inches='tight') plt.close() mean, median, std = sigma_clipped_stats(meanImg, sigma=1.0) meanImg[meanImg <= (mean)] = 1.e-30 meanImg[meanImg < 0] = 1.e-30 if plot: fig = plt.figure(figsize=(10,10)) ax = fig.gca() ax.imshow((meanImg)/np.nanmax(meanImg)) plt.axis('off') plt.savefig("quiverMaps/normalizedMeanImage_%s.png" % transient_name, bbox_inches='tight') plt.close() fig = plt.figure(figsize=(10,10)) ax = fig.gca() ax.imshow(background/np.nanmax(background)) plt.axis('off') plt.savefig("quiverMaps/normalizedMeanBackground_%s.png" % transient_name, bbox_inches='tight') plt.close() if nancount > 1.e5: imgWeight = 0 elif (mean_center > 1.e4): #and (size is not 'large'): imgWeight = 0.75 elif size == 'medium': imgWeight = 0.33 else: imgWeight = 0.10 print("imgWeight= %f"%imgWeight, file=f) fullbackground = ((1-imgWeight)*background/np.nanmax(background) + imgWeight*meanImg/np.nanmax(meanImg))*np.nanmax(background) # background = (0.66*background/np.max(background) + imgWeight*meanImg/np.nanmax(meanImg))*np.max(background) n = px X, Y = np.mgrid[0:n, 0:n] dx, dy = np.gradient(fullbackground.T) n_plot = 10 dx_small = dx[::n_plot, ::n_plot] dy_small = dy[::n_plot, ::n_plot] print("step = %f"% step_sizes[int(i)], file=f) start = [[int(px/2),int(px/2)]] #the center of the grid if True: #if background[int(px/2),int(px/2)] > 0: #if we have some background flux (greater than 3 stdevs away from the median background), follow the gradient start.append(updateStep(px, dx, dy, step_sizes[int(i)], start[-1], size)) for j in np.arange(1.e3): start.append(updateStep(px, dx, dy, step_sizes[int(i)], start[-1], size)) it_array = np.array(start) endPoint = start[-1] if plot: fig = plt.figure(figsize=(10,10)) ax = fig.gca() ax.imshow(fullbackground) plt.axis("off") plt.savefig("quiverMaps/fullBackground_%s.png"%transient_name, bbox_inches='tight') plt.close() coords = wcs.wcs_pix2world(endPoint[0], endPoint[1], 0., ra_dec_order = True) # Note the third argument, set to 0, which indicates whether the pixel coordinates should be treated as starting from (1, 1) (as FITS files do) or from (0, 0) print("Final ra, dec after GD : %f %f"% (coords[0], coords[1]), file=f) col = '#D34E24' col2 = '#B54A24' #lookup by ra, dec try: if size == 'large': a = query_ps1_noname(float(coords[0]), float(coords[1]), 20) else: a = query_ps1_noname(float(coords[0]), float(coords[1]), 5) except TypeError: continue if a: print("Found a host here!", file=f) a = ascii.read(a) a = a.to_pandas() a = a[a['nDetections'] > 1] #a = a[a['ng'] > 1] #a = a[a['primaryDetection'] == 1] smallType = ['AbLS', 'EmLS' , 'EmObj', 'G', 'GammaS', 'GClstr', 'GGroup', 'GPair', 'GTrpl', 'G_Lens', 'IrS', 'PofG', 'RadioS', 'UvES', 'UvS', 'XrayS', '', 'QSO', 'QGroup', 'Q_Lens'] medType = ['G', 'IrS', 'PofG', 'RadioS', 'GPair', 'GGroup', 'GClstr', 'EmLS', 'RadioS', 'UvS', 'UvES', ''] largeType = ['G', 'PofG', 'GPair', 'GGroup', 'GClstr'] if len(a) > 0: a = getNEDInfo(a) if (size == 'large'):# and (np.nanmax(a['rKronRad'].values) > 5)): # print("L: picking the largest >5 kronRad host within 10 arcsec", file=f) print("L: picking the closest NED galaxy within 20 arcsec", file=f) #a = a[a['rKronRad'] == np.nanmax(a['rKronRad'].values)] tempA = a[a['NED_type'].isin(largeType)] if len(tempA) > 0: a = tempA tempA = a[a['NED_type'] == 'G'] if len(tempA) > 0: a = tempA #tempA = a[a['NED_mag'] == np.nanmin(a['NED_mag'])] #if len(tempA) > 0: # a = tempA if len(a) > 1: a = a.iloc[[0]] elif (size == 'medium'): #print("M: Picking the largest host within 5 arcsec", file=f) print("M: Picking the closest NED galaxy within 5 arcsec", file=f) #a = a[a['rKronRad'] == np.nanmax(a['rKronRad'].values)] tempA = a[a['NED_type'].isin(medType)] if len(tempA) > 0: a = tempA if len(a) > 1: a = a.iloc[[0]] else: tempA = a[a['NED_type'].isin(smallType)] if len(tempA) > 0: a = tempA a = a.iloc[[0]] print("S: Picking the closest non-stellar source within 5 arcsec", file=f) #else: # f.flush() # continue #threshold = [1, 1, 0, 0, 0, 0] #flag = ['nDetections', 'nr', 'rPlateScale', 'primaryDetection', 'rKronRad', 'rKronFlux'] #j = 0 #while len(a) > 1: # if np.sum(a[flag[int(j)]] > threshold[int(j)]) > 0: # tempA = a[a[flag[int(j)]] > threshold[int(j)]] # j += 1 # a = tempA # if (j
info from LVIS api. """ try: import lvis assert lvis.__version__ >= '10.5.3' from lvis import LVIS except AssertionError: raise AssertionError('Incompatible version of lvis is installed. ' 'Run pip uninstall lvis first. Then run pip ' 'install mmlvis to install open-mmlab forked ' 'lvis. ') except ImportError: raise ImportError('Package lvis is not installed. Please run pip ' 'install mmlvis to install open-mmlab forked ' 'lvis.') self.coco = LVIS(ann_file) self.cat_ids = self.coco.get_cat_ids() self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} self.img_ids = self.coco.get_img_ids() data_infos = [] for i in self.img_ids: info = self.coco.load_imgs([i])[0] if info['file_name'].startswith('COCO'): # Convert form the COCO 2014 file naming convention of # COCO_[train/val/test]2014_000000000000.jpg to the 2017 # naming convention of 000000000000.jpg # (LVIS v1 will fix this naming issue) info['filename'] = info['file_name'][-16:] else: info['filename'] = info['file_name'] data_infos.append(info) return data_infos def evaluate(self, results, metric='bbox', logger=None, jsonfile_prefix=None, classwise=False, proposal_nums=(100, 300, 1000), iou_thrs=np.arange(0.5, 0.96, 0.05)): """Evaluation in LVIS protocol. Args: results (list[list | tuple]): Testing results of the dataset. metric (str | list[str]): Metrics to be evaluated. Options are 'bbox', 'segm', 'proposal', 'proposal_fast'. logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. jsonfile_prefix (str | None): classwise (bool): Whether to evaluating the AP for each class. proposal_nums (Sequence[int]): Proposal number used for evaluating recalls, such as recall@100, recall@1000. Default: (100, 300, 1000). iou_thrs (Sequence[float]): IoU threshold used for evaluating recalls. If set to a list, the average recall of all IoUs will also be computed. Default: 0.5. Returns: dict[str, float]: LVIS style metrics. """ try: import lvis assert lvis.__version__ >= '10.5.3' from lvis import LVISResults, LVISEval except AssertionError: raise AssertionError('Incompatible version of lvis is installed. ' 'Run pip uninstall lvis first. Then run pip ' 'install mmlvis to install open-mmlab forked ' 'lvis. ') except ImportError: raise ImportError('Package lvis is not installed. Please run pip ' 'install mmlvis to install open-mmlab forked ' 'lvis.') assert isinstance(results, list), 'results must be a list' assert len(results) == len(self), ( 'The length of results is not equal to the dataset len: {} != {}'. format(len(results), len(self))) metrics = metric if isinstance(metric, list) else [metric] allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast'] for metric in metrics: if metric not in allowed_metrics: raise KeyError('metric {} is not supported'.format(metric)) if jsonfile_prefix is None: tmp_dir = tempfile.TemporaryDirectory() jsonfile_prefix = osp.join(tmp_dir.name, 'results') else: tmp_dir = None result_files = self.results2json(results, jsonfile_prefix) eval_results = OrderedDict() # get original api lvis_gt = self.coco for metric in metrics: msg = 'Evaluating {}...'.format(metric) if logger is None: msg = '\n' + msg print_log(msg, logger=logger) if metric == 'proposal_fast': ar = self.fast_eval_recall( results, proposal_nums, iou_thrs, logger='silent') log_msg = [] for i, num in enumerate(proposal_nums): eval_results['AR@{}'.<EMAIL>(num)] = ar[i] log_msg.append('\nAR@{}\t{:.4f}'.format(num, ar[i])) log_msg = ''.join(log_msg) print_log(log_msg, logger=logger) continue if metric not in result_files: raise KeyError('{} is not in results'.format(metric)) try: lvis_dt = LVISResults(lvis_gt, result_files[metric]) except IndexError: print_log( 'The testing results of the whole dataset is empty.', logger=logger, level=logging.ERROR) break iou_type = 'bbox' if metric == 'proposal' else metric lvis_eval = LVISEval(lvis_gt, lvis_dt, iou_type) lvis_eval.params.imgIds = self.img_ids if metric == 'proposal': lvis_eval.params.useCats = 0 lvis_eval.params.maxDets = list(proposal_nums) lvis_eval.evaluate() lvis_eval.accumulate() lvis_eval.summarize() for k, v in lvis_eval.get_results().items(): if k.startswith('AR'): val = float('{:.3f}'.format(float(v))) eval_results[k] = val else: lvis_eval.evaluate() lvis_eval.accumulate() lvis_eval.summarize() lvis_results = lvis_eval.get_results() if classwise: # Compute per-category AP # Compute per-category AP # from https://github.com/facebookresearch/detectron2/ precisions = lvis_eval.eval['precision'] # precision: (iou, recall, cls, area range, max dets) assert len(self.cat_ids) == precisions.shape[2] results_per_category = [] for idx, catId in enumerate(self.cat_ids): # area range index 0: all area ranges # max dets index -1: typically 100 per image nm = self.coco.load_cats(catId)[0] precision = precisions[:, :, idx, 0, -1] precision = precision[precision > -1] if precision.size: ap = np.mean(precision) else: ap = float('nan') results_per_category.append( (f'{nm["name"]}', f'{float(ap):0.3f}')) num_columns = min(6, len(results_per_category) * 2) results_flatten = list( itertools.chain(*results_per_category)) headers = ['category', 'AP'] * (num_columns // 2) results_2d = itertools.zip_longest(*[ results_flatten[i::num_columns] for i in range(num_columns) ]) table_data = [headers] table_data += [result for result in results_2d] table = AsciiTable(table_data) print_log('\n' + table.table, logger=logger) for k, v in lvis_results.items(): if k.startswith('AP'): key = '{}_{}'.format(metric, k) val = float('{:.3f}'.format(float(v))) eval_results[key] = val ap_summary = ' '.join([ '{}:{:.3f}'.format(k, float(v)) for k, v in lvis_results.items() if k.startswith('AP') ]) eval_results['{}_mAP_copypaste'.format(metric)] = ap_summary lvis_eval.print_results() if tmp_dir is not None: tmp_dir.cleanup() return eval_results LVISDataset = LVISV05Dataset DATASETS.register_module(name='LVISDataset', module=LVISDataset) @DATASETS.register_module() class LVISV1Dataset(LVISDataset): CLASSES = ( 'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock', 'alcohol', 'alligator', 'almond', 'ambulance', 'amplifier', 'anklet', 'antenna', 'apple', 'applesauce', 'apricot', 'apron', 'aquarium', 'arctic_(type_of_shoe)', 'armband', 'armchair', 'armoire', 'armor', 'artichoke', 'trash_can', 'ashtray', 'asparagus', 'atomizer', 'avocado', 'award', 'awning', 'ax', 'baboon', 'baby_buggy', 'basketball_backboard', 'backpack', 'handbag', 'suitcase', 'bagel', 'bagpipe', 'baguet', 'bait', 'ball', 'ballet_skirt', 'balloon', 'bamboo', 'banana', 'Band_Aid', 'bandage', 'bandanna', 'banjo', 'banner', 'barbell', 'barge', 'barrel', 'barrette', 'barrow', 'baseball_base', 'baseball', 'baseball_bat', 'baseball_cap', 'baseball_glove', 'basket', 'basketball', 'bass_horn', 'bat_(animal)', 'bath_mat', 'bath_towel', 'bathrobe', 'bathtub', 'batter_(food)', 'battery', 'beachball', 'bead', 'bean_curd', 'beanbag', 'beanie', 'bear', 'bed', 'bedpan', 'bedspread', 'cow', 'beef_(food)', 'beeper', 'beer_bottle', 'beer_can', 'beetle', 'bell', 'bell_pepper', 'belt', 'belt_buckle', 'bench', 'beret', 'bib', 'Bible', 'bicycle', 'visor', 'billboard', 'binder', 'binoculars', 'bird', 'birdfeeder', 'birdbath', 'birdcage', 'birdhouse', 'birthday_cake', 'birthday_card', 'pirate_flag', 'black_sheep', 'blackberry', 'blackboard', 'blanket', 'blazer', 'blender', 'blimp', 'blinker', 'blouse', 'blueberry', 'gameboard', 'boat', 'bob', 'bobbin', 'bobby_pin', 'boiled_egg', 'bolo_tie', 'deadbolt', 'bolt', 'bonnet', 'book', 'bookcase', 'booklet', 'bookmark', 'boom_microphone', 'boot', 'bottle', 'bottle_opener', 'bouquet', 'bow_(weapon)', 'bow_(decorative_ribbons)', 'bow-tie', 'bowl', 'pipe_bowl', 'bowler_hat', 'bowling_ball', 'box', 'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere', 'bread-bin', 'bread', 'breechcloth', 'bridal_gown', 'briefcase', 'broccoli', 'broach', 'broom', 'brownie', 'brussels_sprouts', 'bubble_gum', 'bucket', 'horse_buggy', 'bull', 'bulldog', 'bulldozer', 'bullet_train', 'bulletin_board', 'bulletproof_vest', 'bullhorn', 'bun', 'bunk_bed', 'buoy', 'burrito', 'bus_(vehicle)', 'business_card', 'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car', 'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf', 'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)', 'can', 'can_opener', 'candle', 'candle_holder', 'candy_bar', 'candy_cane', 'walking_cane', 'canister', 'canoe', 'cantaloup', 'canteen', 'cap_(headwear)', 'bottle_cap', 'cape', 'cappuccino', 'car_(automobile)', 'railcar_(part_of_a_train)', 'elevator_car', 'car_battery', 'identity_card', 'card', 'cardigan', 'cargo_ship', 'carnation', 'horse_carriage', 'carrot', 'tote_bag', 'cart', 'carton', 'cash_register', 'casserole', 'cassette', 'cast', 'cat', 'cauliflower', 'cayenne_(spice)', 'CD_player', 'celery', 'cellular_telephone', 'chain_mail', 'chair', 'chaise_longue', 'chalice', 'chandelier', 'chap', 'checkbook', 'checkerboard', 'cherry', 'chessboard', 'chicken_(animal)', 'chickpea', 'chili_(vegetable)', 'chime', 'chinaware', 'crisp_(potato_chip)', 'poker_chip', 'chocolate_bar', 'chocolate_cake', 'chocolate_milk', 'chocolate_mousse', 'choker', 'chopping_board', 'chopstick', 'Christmas_tree', 'slide', 'cider', 'cigar_box', 'cigarette', 'cigarette_case', 'cistern', 'clarinet', 'clasp', 'cleansing_agent', 'cleat_(for_securing_rope)', 'clementine', 'clip', 'clipboard', 'clippers_(for_plants)', 'cloak', 'clock', 'clock_tower', 'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster', 'coat', 'coat_hanger', 'coatrack', 'cock', 'cockroach', 'cocoa_(beverage)', 'coconut', 'coffee_maker', 'coffee_table', 'coffeepot', 'coil', 'coin', 'colander', 'coleslaw', 'coloring_material', 'combination_lock', 'pacifier', 'comic_book', 'compass', 'computer_keyboard', 'condiment', 'cone', 'control', 'convertible_(automobile)', 'sofa_bed', 'cooker', 'cookie', 'cooking_utensil', 'cooler_(for_food)', 'cork_(bottle_plug)', 'corkboard', 'corkscrew', 'edible_corn', 'cornbread', 'cornet', 'cornice', 'cornmeal', 'corset', 'costume', 'cougar', 'coverall', 'cowbell', 'cowboy_hat', 'crab_(animal)', 'crabmeat', 'cracker', 'crape', 'crate', 'crayon', 'cream_pitcher', 'crescent_roll', 'crib', 'crock_pot', 'crossbar', 'crouton', 'crow', 'crowbar', 'crown', 'crucifix', 'cruise_ship', 'police_cruiser', 'crumb', 'crutch', 'cub_(animal)', 'cube', 'cucumber', 'cufflink', 'cup', 'trophy_cup', 'cupboard', 'cupcake', 'hair_curler', 'curling_iron', 'curtain', 'cushion', 'cylinder', 'cymbal', 'dagger', 'dalmatian', 'dartboard', 'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk', 'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table', 'tux', 'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher', 'dishwasher_detergent', 'dispenser', 'diving_board', 'Dixie_cup', 'dog', 'dog_collar', 'doll', 'dollar', 'dollhouse', 'dolphin', 'domestic_ass', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly', 'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit', 'dresser', 'drill', 'drone', 'dropper', 'drum_(musical_instrument)', 'drumstick', 'duck', 'duckling', 'duct_tape', 'duffel_bag', 'dumbbell', 'dumpster', 'dustpan', 'eagle', 'earphone', 'earplug', 'earring', 'easel', 'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater', 'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk', 'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan', 'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)', 'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm', 'fire_engine', 'fire_extinguisher', 'fire_hose', 'fireplace', 'fireplug', 'first-aid_kit', 'fish', 'fish_(food)', 'fishbowl', 'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flap', 'flash', 'flashlight', 'fleece', 'flip-flop_(sandal)', 'flipper_(footwear)', 'flower_arrangement', 'flute_glass', 'foal', 'folding_chair', 'food_processor', 'football_(American)', 'football_helmet', 'footstool', 'fork', 'forklift', 'freight_car', 'French_toast', 'freshener', 'frisbee', 'frog', 'fruit_juice', 'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage', 'garbage_truck', 'garden_hose', 'gargle', 'gargoyle', 'garlic', 'gasmask', 'gazelle', 'gelatin', 'gemstone', 'generator', 'giant_panda', 'gift_wrap', 'ginger', 'giraffe', 'cincture', 'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles', 'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose', 'gorilla', 'gourd', 'grape', 'grater', 'gravestone', 'gravy_boat', 'green_bean', 'green_onion', 'griddle', 'grill', 'grits', 'grizzly', 'grocery_bag', 'guitar', 'gull', 'gun', 'hairbrush', 'hairnet', 'hairpin', 'halter_top', 'ham', 'hamburger', 'hammer', 'hammock', 'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel', 'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw', 'hardback_book', 'harmonium', 'hat', 'hatbox', 'veil', 'headband', 'headboard', 'headlight', 'headscarf', 'headset', 'headstall_(for_horses)', 'heart', 'heater', 'helicopter', 'helmet', 'heron', 'highchair', 'hinge', 'hippopotamus', 'hockey_stick', 'hog', 'home_plate_(baseball)', 'honey', 'fume_hood', 'hook', 'hookah', 'hornet', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce', 'hourglass', 'houseboat', 'hummingbird', 'hummus',
<reponame>emencia/emencia_paste_djangocms_2 """ .. _buildout: http://www.buildout.org/ .. _virtualenv: http://www.virtualenv.org/ .. _pip: http://www.pip-installer.org .. _Foundation 3: http://foundation.zurb.com/old-docs/f3/ .. _Foundation: http://foundation.zurb.com/ .. _Foundation Orbit: http://foundation.zurb.com/orbit.php .. _modular-scale: https://github.com/scottkellum/modular-scale .. _Compass: http://compass-style.org/ .. _SCSS: http://sass-lang.com/ .. _rvm: http://rvm.io/ .. _Django: https://www.djangoproject.com/ .. _django-admin-tools: https://bitbucket.org/izi/django-admin-tools/ .. _Django CMS: https://www.django-cms.org/ .. _django-assets: http://elsdoerfer.name/docs/django-assets/ .. _django-debug-toolbar: https://github.com/django-debug-toolbar/django-debug-toolbar/ .. _Django Blog Zinnia: https://github.com/Fantomas42/django-blog-zinnia .. _Django CKEditor: https://github.com/divio/djangocms-text-ckeditor/ .. _Django Filebrowser: https://github.com/wardi/django-filebrowser-no-grappelli .. _django-google-tools: https://pypi.python.org/pypi/django-google-tools .. _Django Porticus: https://github.com/emencia/porticus .. _Django PDB: https://github.com/tomchristie/django-pdb .. _Django flatpages app: https://docs.djangoproject.com/en/1.5/ref/contrib/flatpages/ .. _Django sites app: https://docs.djangoproject.com/en/1.5/ref/contrib/sites/ .. _Django reCaptcha: https://github.com/praekelt/django-recaptcha .. _Django registration: https://django-registration.readthedocs.org/en/latest/ .. _CKEditor: http://ckeditor.com/ .. _emencia-cms-snippet: https://github.com/emencia/emencia-cms-snippet .. _Service reCaptcha: http://www.google.com/recaptcha .. _Django Codemirror: https://github.com/sveetch/djangocodemirror .. _django-crispy-forms: https://github.com/maraujop/django-crispy-forms .. _crispy-forms-foundation: https://github.com/sveetch/crispy-forms-foundation .. _emencia-django-slideshows: https://github.com/emencia/emencia-django-slideshows .. _emencia-django-staticpages: https://github.com/emencia/emencia-django-staticpages .. _emencia-django-socialaggregator: https://github.com/emencia/emencia-django-socialaggregator .. _django-urls-map: https://github.com/sveetch/django-urls-map .. _Sitemap framework: https://docs.djangoproject.com/en/1.5/ref/contrib/sitemaps/ ******************* DjangoCMS 2.x paste ******************* DjangoCMS projects are created with the many components that are available for use. These components are called **mods** and these mods are already installed and ready to use, but they are not all enabled. You can enable or disable them, as needed. It is always preferable to use the mods system to install new apps. You should never install a new app with `pip`_. If you plan to integrate it into the project, always use the `buildout`_ system. Just open and edit the ``buildout.cfg`` file to add the new egg to be installed. For more details, read the `buildout`_ documentation. **This paste is not really maintained anymore, you should prefer to see for the DjangoCMS 3.x version instead.** Links ===== * Download his `PyPi package <https://pypi.python.org/pypi/emencia_paste_djangocms_2>`_; * Clone it on his `Github repository <https://github.com/emencia/emencia_paste_djangocms_2>`_; Paste ===== This paste will appear with the name ``djangocms-2`` in the paster templates list (with the ``paster create --list-templates`` command). To use this paste to create a new project you will do something like : :: paster create -t djangocms-2 myproject Django ====== django-instance --------------- This is the command installed to replace the ``manage.py`` script in Django. ``django-instance`` is aware of the installed eggs. Paste template version ---------------------- In your projects, you can find from which Paste template they have been builded in the 'project/__init__.py' file where you should find the used package name and its version. Note that previously (before the Epaster version 1.8), this file was containing the Epaster version, not the Paste template one, since the package didn't exists yet. How the Mods work ----------------- The advantage of centralizing app configurations in their mods is the project's ``settings.py`` and ``urls.py`` are gathered together in its configuration (cache, smtp, paths, BDD access, etc.). Furthermore, it is easier to enable or disable the apps. To create a new mods, create a directory in ``$PROJECT/mods_avalaible/`` that contains at least one empty ``__init__.py`` and a ``settings.py`` to build the app in the project and potentially its settings. The `settings.py`` and ``urls.py`` files in this directory will be executed automatically by the project (the system loads them after the project ones so that a mods can overwrite the project's initial settings and urls). N.B. With Django's ``runserver`` command, a change to these files does not reload the project instance; you need to relaunch it yourself manually. To enable a new mods, you need to create its symbolic link (**a relative path**) in ``$PROJECT/mods_enabled``. To disable it, simply delete the symbolic link. Compass ======= `Compass`_ is a **Ruby** tool used to compile `SCSS`_ sources in **CSS**. By default, a Django project has its `SCSS`_ sources in the ``compass/scss/`` directory. The CSS `Foundation`_ framework is used as the database. A recent install of Ruby and Compass is required first for this purpose (see `RVM`_ if your system installation is not up to date). Once installed, you can then compile the sources on demand. Simply go to the ``compass/`` directory and launch this command: :: compass compile When you are working uninterruptedly on the sources, you can simply launch the following command: :: compass watch `Compass`_ will monitor the directory of sources and recompile the modified sources automatically. By default the ``compass/config.rb`` configuration file (the equivalent of `settings.py`` in Django) is used. If needed, you can create another one and specify it to `Compass`_ in its command (for more details, see the documentation). Foundation ---------- This project embeds `Foundation`_ 5 sources installed from the `Foundation`_ app so you can update it from the sources if needed (and if you have installed the Foundation cli, see its documentation for more details). If you update it, you need to synchronize the updated sources in the project's static files using a command in the Makefile: :: make syncf5 **You only have to do this when you want to synchronize the project's Foundation sources from the latest Foundation release. Commonly this is reserved for Epaster developers.** This will update the Javascript sources in the static files, but make sure that it cleans the directory first. Never put your files in the ``project/webapp_statics/js/foundation5`` directory or they will be deleted. Be aware that the sources update will give you some file prefixed with a dot like ``.gitignore``, you must rename all of them like this ``+dot+gitignore``, yep the dot character have to be renamed to ``+dot+``, else it will cause troubles with GIT and Epaster. There is a python script named ``fix_dotted_filename.py`` in the source directory, use it to automatically apply this renaming. For the `Foundation`_ SCSS sources, no action is required; they are imported directly into the compass config. The project also embeds `Foundation 3`_ sources (they are used for some components in Django administration) but you don't have to worry about them. RVM --- `rvm`_ is somewhat like what `virtualenv`_ is to Python: a virtual environment. The difference is that it is intended for the parallel installation of a number of different versions of **Ruby** without mixing the gems (the **Ruby** application packages). In our scenario, it allows you to install a recent version of **Ruby** without affecting your system installation. This is not required, just an usefull cheat to know when developing on a server with an old distribution. Installation and initial use ============================ Once your project has been created with this epaster template, you need to install it to use it. The process is simple. Do it in your project directory: :: make install When it's finished, active the virtual environment: :: source bin/active You can then use the project on the development server: :: django-instance runserver 0.0.0.0:8001 You will then be able to access it at the following url (where ``127.0.0.1`` will be the server's IP address if you work on a remote machine) : ``http://127.0.0.1:8001/`` The first action required is the creation of a CMS page for the home page and you must fill in the site name and its domain under ``Administration > Sites > Sites > Add site``. Available mods ============== .. document-mods:: Changelogs ========== Version 1.9.8 - 2015/01/28 -------------------------- * Fix webassets bug: since we use Bundle names with version placeholder, webassets needed a manifest file to know what version to use in its templatetags. So now a ``webassets.manifest`` file is created in ``project/webapp_statics`` directory and will be copied to ``project/static`` dir when assets are deployed; This will be the last maintenance release, don't expect any other update for this package. Version 1.9.7 - 2015/01/20 -------------------------- Changing default behavior of *Asset bundles* in ``project/assets.py`` so now bundle urls will be like ``/static/screen.acefe50.css`` instead of old behavior ``/static/screen.min.css?acefe50`` that was causing issue with old proxies caches (see `webassets documentation <http://webassets.readthedocs.org/en/latest/expiring.html#expire-using-the-filename>`_); You can safely backport this change to your old projects, this should be transparent to your install and won't require any server change. Version 1.9.6.1 - 2014/12/26 ---------------------------- * Fix a damned bug with ``bootstrap.py`` that was forcing to upgrade to ``setuptools=0.8`` that seems to results with bad parsing on some constraints like the one from django-cms for ``django-mptt==0.5.2,==0.6,==0.6.1`` that was causing a buildout fail on conflict version. This has been fixed with updating to the last ``bootstrap.py`` and use its command line arguments to fix versions for ``zc.buildout`` and ``setuptools`` in the Makefile; Version 1.9.6 - 2014/11/17 -------------------------- * Mount 500 and 404 page view in urls.py when debug mode is activated; Version 1.9.5 - 2014/11/07 -------------------------- * Update to ``zc.buildout==2.2.5``; * Update to ``buildout.recipe.uwsgi==0.0.24``; * Update to ``collective.recipe.cmd==0.9``; * Update to ``collective.recipe.template==1.11``; * Update to ``djangorecipe==1.10``; * Update to ``porticus==0.8.1``; * Add package ``cmsplugin-porticus==0.1.2`` in buildout config; * Remove dependancy for ``zc.buildout`` and ``zc.recipe.egg``; Version 1.9.4 - 2014/11/02 -------------------------- Update mods doc Version 1.9.3 - 2014/11/01 -------------------------- Fix some
# -*- coding: utf-8 -*- # -------------------------- # Copyright © 2014 - Qentinel Group. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # --------------------------- import math from robot.api import logger from selenium.common.exceptions import NoSuchElementException, \ StaleElementReferenceException, JavascriptException, InvalidSelectorException,\ WebDriverException, NoSuchFrameException import QWeb.internal.frame as frame from QWeb.internal.exceptions import QWebElementNotFoundError, QWebStalingElementError,\ QWebValueError, QWebSearchingMode from QWeb.internal import browser, javascript, util from QWeb.internal.config_defaults import CONFIG ACTIVE_AREA_FUNCTION = None def is_enabled(element): """Is the element interactable? Uses the disabled attribute to determine if form element is enabled or not. Parameters ---------- element : WebElement Returns ------- bool """ disabled = element.get_attribute('disabled') return not bool(disabled) def is_readonly(element): """Is the element interactable? Uses the readonly attribute to determine if form element is enabled or not. Parameters ---------- element : WebElement Returns ------- bool """ return util.par2bool(javascript.execute_javascript( 'return arguments[0].hasAttribute("readonly")', element)) def is_visible(element): """Is the element interactable? Uses the display attribute to determine if form element is visible or not. Parameters ---------- element : WebElement Returns ------- bool """ visibility = javascript.execute_javascript('return arguments[0].style.display', element) return bool(visibility.lower() != 'none') def get_closest_element(locator_element, candidate_elements): """Get the closest element in a list of elements to a wanted element. Parameters ---------- locator_element : WebElement candidate_elements : :obj:`list` of :obj:`WebElement` Returns ------- WebElement """ if not candidate_elements: raise QWebElementNotFoundError('No elements visible') closest_element_list = [] closest_distance = 1000000 # Just some large number for candidate_element in candidate_elements: element_info = _list_info(candidate_element) logger.debug("Measuring distance for: {}".format(element_info)) if _overlap(locator_element, candidate_element): logger.debug('Elements overlap, returning this: {}'.format(element_info)) return candidate_element distance = _calculate_closest_distance(locator_element, candidate_element) logger.debug("Candidate {}: distance: {}".format(candidate_element, distance)) if abs(distance - closest_distance) < 2: closest_element_list.append(candidate_element) closest_distance = distance elif distance < closest_distance: closest_distance = distance closest_element_list = [candidate_element] closest_element = _get_closest_ortho_element(locator_element, closest_element_list) logger.debug("Closest distance found is {}".format(closest_distance)) logger.debug("Closest element is: {}".format(_list_info(closest_element))) return closest_element def get_unique_element_by_xpath(xpath, **kwargs): """Get element if it is needed to be unique. One use case is that when xpath is written in the test script with the prefix xpath=. Parameters ---------- xpath : str XPath string. If 'xpath=' -prefix is used, it will be omitted. """ if xpath.startswith("xpath="): xpath = xpath.split("=", 1)[1] elements = get_webelements_in_active_area(xpath, **kwargs) # pylint: disable=no-else-return if elements and len(elements) == 1: if CONFIG['SearchMode']: draw_borders(elements[0]) return elements[0] elif not elements: raise QWebElementNotFoundError( 'XPath {} did not find any elements'.format(xpath)) raise QWebValueError('XPath {} matched {} elements. Needs to be unique' .format(xpath, len(elements))) @frame.all_frames def get_webelements(xpath, **kwargs): """Get visible web elements that correspond to given XPath. To check that element is visible it is checked that it has width. This does not handle all cases but it is fast so no need to modify if it works. Replace the visibility check using WebElement's is_displayed method if necessary. Parameters ---------- xpath : str XPath expression without xpath= prefix. Returns ------- :obj:`list` of :obj:`WebElement` List of visible WebElements. """ if xpath.startswith("xpath="): xpath = xpath.split("=", 1)[1] driver = browser.get_current_browser() web_elements = driver.find_elements_by_xpath(xpath) logger.trace("XPath {} matched {} WebElements" .format(xpath, len(web_elements))) web_elements = get_visible_elements_from_elements(web_elements, **kwargs) return web_elements @frame.all_frames def get_webelements_in_active_area(xpath, **kwargs): """Find element under another element. If ${ACTIVE_AREA_FUNC} returns an element then the xpath is searched from that element. Otherwise the element is searched under body element. Parameters ---------- xpath : str Xpath expression without xpath= prefix. Returns ------- :obj:`list` of :obj:`WebElement` List of visible WebElements. """ active_area_xpath = CONFIG["ActiveAreaXpath"] if ACTIVE_AREA_FUNCTION is not None: active_area = ACTIVE_AREA_FUNCTION() if active_area: xpath = xpath.replace('//', './/', 1) else: driver = browser.get_current_browser() active_area = driver.find_element_by_xpath(active_area_xpath) else: driver = browser.get_current_browser() try: active_area = driver.find_element_by_xpath(active_area_xpath) if active_area is None: logger.debug('Got None for active area. Is page still loading ' 'or is it missing body tag?') return None # //body not found, is page still loading? Return None to continue looping except NoSuchElementException: logger.debug("Cannot locate //body element. Is page still loading?") return None try: webelements = active_area.find_elements_by_xpath(xpath) logger.trace('XPath {} matched {} webelements' .format(xpath, len(webelements))) webelements = get_visible_elements_from_elements(webelements, **kwargs) except StaleElementReferenceException: raise QWebStalingElementError('Got StaleElementException') except (JavascriptException, InvalidSelectorException) as e: logger.debug('Got {}, returning None'.format(e)) webelements = None return webelements def get_visible_elements_from_elements(web_elements, **kwargs): visible_elements = [] hiding_elements = [] vis_check = util.par2bool(kwargs.get('visibility', CONFIG['Visibility'])) if not vis_check: logger.debug('allow invisible elements') return web_elements viewport_check = util.par2bool(kwargs.get('viewport', CONFIG['InViewport'])) try: elem_objects = javascript.get_visibility(web_elements) logger.debug('Checking visibility from all found elements: {}'.format(len(elem_objects))) except (JavascriptException, StaleElementReferenceException, TypeError) as e: raise QWebStalingElementError("Exception from visibility check: {}".format(e)) for el in elem_objects: onscreen = el.get('viewport') logger.debug('Is element in viewport: {}'.format(onscreen)) css_visibility = el.get('css') logger.debug('CSS visibility is not hidden and ' 'display is not none: {}'.format(css_visibility)) offset = el.get('offset') logger.debug('Element offsetWidth is > 0: {}'.format(offset)) if css_visibility and onscreen: if util.par2bool(kwargs.get('offset', CONFIG['OffsetCheck'])): if offset and onscreen: visible_elements.append(el.get('elem')) elif offset: hiding_elements.append(el.get('elem')) elif onscreen: visible_elements.append(el.get('elem')) else: hiding_elements.append(el.get('elem')) elif css_visibility: hiding_elements.append(el.get('elem')) logger.debug('found {} visible elements and {} hiding ones' .format(len(visible_elements), len(hiding_elements))) if viewport_check: return visible_elements return visible_elements + hiding_elements def draw_borders(elements): mode = CONFIG['SearchMode'] if not isinstance(elements, list): elements = [elements] for e in elements: if mode.lower() == 'debug': javascript.highlight_element(e, False) raise QWebSearchingMode('Element highlighted') if mode.lower() == 'draw': javascript.highlight_element(e, True) elif mode.lower() == 'flash': javascript.highlight_element(e, False, True) def _calculate_closest_distance(element1, element2): """Calculate closest distance between elements in pixel units. Gets corners' locations for both elements and use them to calculate the closest distance between the elements. Uses Manhattan distance. Parameters ---------- element1 : WebElement element2 : WebElement Returns ------- float """ search_direction = CONFIG["SearchDirection"] corners_locations1 = _get_corners_locations(element1) corners_locations2 = _get_corners_locations(element2) closest_distance = 1000000 # Some large number for corner1 in corners_locations1: for corner2 in corners_locations2: distance = _manhattan_distance(corner1['x'], corner1['y'], corner2['x'], corner2['y']) if search_direction != 'closest': # y coordinate goes up downwards on page # small y is above angle = math.degrees(math.atan2(corner2['y'] - corner1['y'], corner2['x'] - corner1['x'])) if search_direction == 'down': if not 5 < angle < 175: logger.debug('Search direction is {} and element is not in arc'. format(search_direction)) distance = 1000000 elif search_direction == 'up': if not -175 < angle < -5: logger.debug('Search direction is {} and element is not in arc'. format(search_direction)) distance = 1000000 elif search_direction == 'left': if not abs(angle) > 95: logger.debug('Search direction is {} and element is not in arc'. format(search_direction)) distance = 1000000 elif search_direction == 'right': if not -85 < angle < 85: logger.debug('Search direction is {} and element is not in arc'. format(search_direction)) distance = 1000000 if closest_distance > distance > 0: closest_distance = distance return closest_distance def _calculate_closest_ortho_distance(element1, element2): """Returns shortest ortho distance between locator and candidate element centers Parameters ---------- element1 : WebElement element2 : WebElement Returns ------- float """ center_1 = _get_center_location(element1) center_2 = _get_center_location(element2) distance_h = abs(center_1['x'] - center_2['x']) distance_v = abs(center_1['y'] - center_2['y']) return min(distance_h, distance_v) def _get_center_location(element): """ Calculate rectangle's center locations Each element on a web page is in a rectangle. Uses the WebElement's location and size attributes to get center. Parameters ---------- element : WebElement Returns ------- tuple A tuple with 2 elements: center x and y coordinates. """ location = element.location size = element.size center = {'x': location['x'] + (size['width'] / 2), 'y': location['y'] + (size['height'] / 2)} return center def _get_corners_locations(element): """Calculate rectangle's corners' locations Each element on a web page is in a rectangle. Uses the WebElement's location and size attributes to get all corners. Parameters ---------- element : WebElement Returns ------- tuple A tuple with 4 elements: top left corner, top right corner, bottom left corner, bottom right corner. """ location = element.location size = element.size top_left_corner = {'x': location['x'], 'y': location['y']} top_right_corner = {'x': location['x'] + size['width'], 'y': location['y']} bottom_left_corner = {'x': location['x'], 'y': location['y'] + size['height']} bottom_right_corner = {'x': top_right_corner['x'], 'y': bottom_left_corner['y']} corners_locations = (top_left_corner, top_right_corner, bottom_left_corner, bottom_right_corner) return corners_locations def _manhattan_distance(x0, y0, x1, y1): """Get manhattan distance between points (x0, y0) and (x1, y1).""" return abs(x0 - x1) + abs(y0 -
ipv6_capture_hostname: Determines if the IPv6 host name and lease time is captured or not while assigning a fixed address. ipv6_ddns_domainname: The Grid-level DDNS domain name value. ipv6_ddns_enable_option_fqdn: Controls whether the FQDN option sent by the client is to be used, or if the server can automatically generate the FQDN. ipv6_ddns_server_always_updates: Determines if the server always updates DNS or updates only if requested by the client. ipv6_ddns_ttl: The Grid-level IPv6 DDNS TTL value. ipv6_default_prefix: The Grid-level IPv6 default prefix. ipv6_dns_update_style: The update style for dynamic DHCPv6 DNS updates. ipv6_domain_name: The IPv6 domain name. ipv6_domain_name_servers: The comma separated list of domain name server addresses in IPv6 address format. ipv6_enable_ddns: Determines if sending DDNS updates by the DHCPv6 server is enabled or not. ipv6_enable_gss_tsig: Determines whether the all appliances are enabled to receive GSS-TSIG authenticated updates from DHCPv6 clients. ipv6_enable_lease_scavenging: Indicates whether DHCPv6 lease scavenging is enabled or disabled. ipv6_enable_retry_updates: Determines if the DHCPv6 server retries failed dynamic DNS updates or not. ipv6_generate_hostname: Determines if the server generates the hostname if it is not sent by the client. ipv6_gss_tsig_keys: The list of GSS-TSIG keys for a Grid DHCPv6 object. ipv6_kdc_server: The IPv6 address or FQDN of the Kerberos server for DHCPv6 GSS-TSIG authentication. ipv6_lease_scavenging_time: The Grid-level grace period (in seconds) to keep an expired lease before it is deleted by the scavenging process. ipv6_microsoft_code_page: The Grid-level Microsoft client DHCP IPv6 code page value. This value is the hostname translation code page for Microsoft DHCP IPv6 clients. ipv6_options: An array of DHCP option structs that lists the DHCPv6 options associated with the object. ipv6_prefixes: The Grid-level list of IPv6 prefixes. ipv6_recycle_leases: Determines if the IPv6 recycle leases feature is enabled or not. If the feature is enabled, leases are kept in the Recycle Bin until one week after expiration. When the feature is disabled, the leases are irrecoverably deleted. ipv6_remember_expired_client_association: Enable binding for expired DHCPv6 leases. ipv6_retry_updates_interval: Determines the retry interval when the member DHCPv6 server makes repeated attempts to send DDNS updates to a DNS server. ipv6_txt_record_handling: The Grid-level TXT record handling value. This value specifies how DHCPv6 should treat the TXT records when performing DNS updates. ipv6_update_dns_on_lease_renewal: Controls whether the DHCPv6 server updates DNS when an IPv6 DHCP lease is renewed. kdc_server: The IPv4 address or FQDN of the Kerberos server for DHCPv4 GSS-TSIG authentication. lease_logging_member: The Grid member on which you want to store the DHCP lease history log. Infoblox recommends that you dedicate a member other than the master as a logging member. If possible, use this member solely for storing the DHCP lease history log. If you do not select a member, no logging can occur. lease_per_client_settings: Defines how the appliance releases DHCP leases. Valid values are "RELEASE_MACHING_ID", "NEVER_RELEASE", or "ONE_LEASE_PER_CLIENT". The default is "RELEASE_MATCHING_ID". lease_scavenge_time: Determines the lease scavenging time value. When this field is set, the appliance permanently deletes the free and backup leases, that remain in the database beyond a specified period of time.To disable lease scavenging, set the parameter to -1. The minimum positive value must be greater than 86400 seconds (1 day). log_lease_events: This value specifies whether the Grid DHCP members log lease events is enabled or not. logic_filter_rules: This field contains the logic filters to be applied on the Infoblox Grid.This list corresponds to the match rules that are written to the dhcpd configuration file. low_water_mark: Determines the low watermark value. If the percent of allocated addresses drops below this watermark, the appliance makes a syslog entry and if enabled, sends an e-mail notification. low_water_mark_reset: Determines the low watermark reset value.If the percentage of allocated addresses exceeds this value, a corresponding SNMP trap is reset.A number that specifies the percentage of allocated addresses. The range is from 1 to 100. The low watermark reset value must be higher than the low watermark value. microsoft_code_page: The Microsoft client DHCP IPv4 code page value of a Grid. This value is the hostname translation code page for Microsoft DHCP IPv4 clients. nextserver: The next server value of a DHCP server. This value is the IP address or name of the boot file server on which the boot file is stored. option60_match_rules: The list of option 60 match rules. options: An array of DHCP option structs that lists the DHCP options associated with the object. Note that WAPI does not return special options 'routers', 'domain-name-servers', 'domain-name' and 'broadcast-address' with empty values for this object. ping_count: Specifies the number of pings that the Infoblox appliance sends to an IP address to verify that it is not in use. Values are range is from 0 to 10, where 0 disables pings. ping_timeout: Indicates the number of milliseconds the appliance waits for a response to its ping.Valid values are 100, 500, 1000, 2000, 3000, 4000 and 5000 milliseconds. preferred_lifetime: The preferred lifetime value. prefix_length_mode: The Prefix length mode for DHCPv6. protocol_hostname_rewrite_policies: The list of hostname rewrite policies. pxe_lease_time: Specifies the duration of time it takes a host to connect to a boot server, such as a TFTP server, and download the file it needs to boot.A 32-bit unsigned integer that represents the duration, in seconds, for which the update is cached. Zero indicates that the update is not cached. recycle_leases: Determines if the recycle leases feature is enabled or not. If you enabled this feature, and then delete a DHCP range, the appliance stores active leases from this range up to one week after the leases expires. restart_setting: The restart setting. retry_ddns_updates: Indicates whether the DHCP server makes repeated attempts to send DDNS updates to a DNS server. syslog_facility: The syslog facility is the location on the syslog server to which you want to sort the syslog messages. txt_record_handling: The Grid-level TXT record handling value. This value specifies how DHCP should treat the TXT records when performing DNS updates. update_dns_on_lease_renewal: Controls whether the DHCP server updates DNS when a DHCP lease is renewed. valid_lifetime: The valid lifetime for the Grid members. """ _infoblox_type = 'grid:dhcpproperties' _fields = ['authority', 'bootfile', 'bootserver', 'capture_hostname', 'ddns_domainname', 'ddns_generate_hostname', 'ddns_retry_interval', 'ddns_server_always_updates', 'ddns_ttl', 'ddns_update_fixed_addresses', 'ddns_use_option81', 'deny_bootp', 'disable_all_nac_filters', 'dns_update_style', 'email_list', 'enable_ddns', 'enable_dhcp_thresholds', 'enable_email_warnings', 'enable_fingerprint', 'enable_gss_tsig', 'enable_hostname_rewrite', 'enable_leasequery', 'enable_roaming_hosts', 'enable_snmp_warnings', 'format_log_option_82', 'grid', 'gss_tsig_keys', 'high_water_mark', 'high_water_mark_reset', 'hostname_rewrite_policy', 'ignore_dhcp_option_list_request', 'ignore_id', 'ignore_mac_addresses', 'immediate_fa_configuration', 'ipv6_capture_hostname', 'ipv6_ddns_domainname', 'ipv6_ddns_enable_option_fqdn', 'ipv6_ddns_server_always_updates', 'ipv6_ddns_ttl', 'ipv6_default_prefix', 'ipv6_dns_update_style', 'ipv6_domain_name', 'ipv6_domain_name_servers', 'ipv6_enable_ddns', 'ipv6_enable_gss_tsig', 'ipv6_enable_lease_scavenging', 'ipv6_enable_retry_updates', 'ipv6_generate_hostname', 'ipv6_gss_tsig_keys', 'ipv6_kdc_server', 'ipv6_lease_scavenging_time', 'ipv6_microsoft_code_page', 'ipv6_options', 'ipv6_prefixes', 'ipv6_recycle_leases', 'ipv6_remember_expired_client_association', 'ipv6_retry_updates_interval', 'ipv6_txt_record_handling', 'ipv6_update_dns_on_lease_renewal', 'kdc_server', 'lease_logging_member', 'lease_per_client_settings', 'lease_scavenge_time', 'log_lease_events', 'logic_filter_rules', 'low_water_mark', 'low_water_mark_reset', 'microsoft_code_page', 'nextserver', 'option60_match_rules', 'options', 'ping_count', 'ping_timeout', 'preferred_lifetime', 'prefix_length_mode', 'protocol_hostname_rewrite_policies', 'pxe_lease_time', 'recycle_leases', 'restart_setting', 'retry_ddns_updates', 'syslog_facility', 'txt_record_handling', 'update_dns_on_lease_renewal', 'valid_lifetime'] _search_for_update_fields = ['grid'] _updateable_search_fields = [] _all_searchable_fields = ['grid'] _return_fields = ['disable_all_nac_filters', 'grid'] _remap = {} _shadow_fields = ['_ref'] _custom_field_processing = { 'ipv6_options': Dhcpoption.from_dict, 'logic_filter_rules': Logicfilterrule.from_dict, 'option60_match_rules': Option60Matchrule.from_dict, 'options': Dhcpoption.from_dict, } class GridDns(InfobloxObject): """ GridDns: Grid DNS properties object. Corresponds to WAPI object 'grid:dns' This object supports DNS service management and configuration such as time-to-live (TTL) settings, zone transfers, queries, root name servers, dynamic updates, sort lists, Transaction Signatures (TSIG) for DNS and others, all at the grid level. The service configurations of a grid are inherited by all members, zones, and networks unless you specifically override them for selected members, zones, and networks. For this reason, it is recommended that you configure services at the grid level before configuring member, zone and network services. Fields: add_client_ip_mac_options: Add custom IP, MAC and DNS View name ENDS0 options to outgoing recursive queries. allow_bulkhost_ddns: Determines if DDNS bulk host is allowed or not. allow_gss_tsig_zone_updates: Determines whether GSS-TSIG zone update is enabled for all Grid members. allow_query: Determines if queries from the specified IPv4 or IPv6 addresses and networks are allowed or not. The appliance can also use Transaction Signature (TSIG) keys to authenticate the queries. allow_recursive_query: Determines if the responses to recursive queries are enabled or not. allow_transfer: Determines if zone transfers
words 1/quantile_error_scale is how much error is ok as a fraction of the bin size be cognizant of ntile, and this value, as passing a small relativeError can increase compute time dramatically defaults to 5 sample_size: Optional[int] size of sample used to calculate quantile bin boundaries no sampling if None, not recommended defauts to 10**5 Returns ------- df Explanation of anonymous return value of type ``type``. match_info : dict contains scale and dropped scale describes what proportion of the treatment group was used and dropped describes what proportion of the treatment group, after scaling, was dropped due to inadequate control candidates Explanation Raises ------ UncaughtExceptions See Also -------- _make_quantile_match_col _execute_quantile_match Notes ----- """ logging.getLogger(__name__).info("starting _quantile_match with args ntile={ntile}, quantile_error_scale={qes}, /" "sample_size={sn}".format(ntile=ntile, qes=quantile_error_scale, sn=sample_size)) label_col = prob_mod.getOrDefault('labelCol') df, match_col = _make_quantile_match_col(df, metric_col, label_col, ntile, quantile_error_scale, sample_size) df, match_info = _execute_quantile_match(df, match_col, label_col) match_info['type'] = 'quantile' return df, match_info @_time_log def _make_quantile_match_col(df: DataFrame, metric_col: str, label_col: str, ntile: int, quantile_error_scale: Optional[Union[int, float]], sample_size: Optional[int]) -> Tuple[DataFrame, str]: r"""bin probability column and return it to be matched Parameters ---------- df : pyspark.sql.DataFrame metric_col : str name of col to be matched ntile : int how many buckets to make out of the metric col and then stratify sample defaults to 10 quantile_error_scale: Union[int, float] error tolerance for calculating boundries for ntiles relativeError passed to approxQuantile is calculated as 1/ntile/quantile_error_scale in other words 1/quantile_error_scale is how much error is ok as a fraction of the bin size be cognizant of ntile, and this value, as passing a small relativeError can increase compute time dramatically sample_size: Optional[int] size of sample used to calculate quantile bin boundaries no sampling if None, not recommended Returns ------- df : pyspark.sql.DataFrame input df but with `match_col` match_col : type colname of col to be matched Explanation Raises ------ UncaughtException See Also -------- _quantile_match: calls it, sets default args for `ntile`, `sample_size`, and `quantile_error_scale` """ t_df = df.where(F.col(label_col) == 1) _persist_if_unpersisted(t_df) t_sample_df = _sample_df(df=t_df, sample_size=sample_size) _persist_if_unpersisted(t_sample_df) # create thresholds for ntiles, convert to native float from numpy float for use w/ pyspark probs = [float(x) for x in np.linspace(start=0, stop=1, num=ntile, endpoint=False)][1:] quantile_error_tolerance = 1/ntile/quantile_error_scale threshs = t_sample_df.approxQuantile(col=metric_col, probabilities=probs, relativeError=quantile_error_tolerance) def make_udf(threshs): return F.udf(lambda x: sum([x > y for y in threshs]), T.ShortType()) # add 1 to conform to mathematical indexing of ntiling _persist_if_unpersisted(df) match_col = "quantile_match_col_{metric_col}".format(metric_col=metric_col) df = df.withColumn(match_col, make_udf(threshs)(F.col(metric_col)) + 1) return df, match_col @_time_log def _execute_quantile_match(df: DataFrame, match_col: str, label_col: str) ->Tuple[DataFrame, dict]: r"""stratified sample and return matched populations & match_info _calc_sample_fracs handles logic for scale/drop Parameters ---------- df : pyspark.sql.DataFrame match_col : str label_col : str Returns ------- df : pyspark.sql.DataFrame match_info : dict Raises ------ Uncaught Exceptions See Also -------- _calc_sample_fracs _sample_dfs Notes ----- """ t_df = df.where(F.col(label_col) == 1) c_can_df = df.where(F.col(label_col) == 0) t_fracs, c_fracs, scaled, dropped = _calc_sample_fracs(t_df, c_can_df, match_col) t_out, c_out = _sample_dfs(t_df, t_fracs, c_can_df, c_fracs, match_col) df = t_out.union(c_out.select(t_out.columns)) match_info = {'scaled': scaled, 'dropped': dropped} return df, match_info @_time_log def _calc_sample_fracs(t_df: DataFrame, c_can_df: DataFrame, match_col: str) -> Tuple[pd.DataFrame, pd.DataFrame, float, float]: r"""given treatment and control_candidates, calculate optimal stratified sample fractions for balance population `scale` is using fewer treatment all across the board, while `drop` is dropping specific treatments without adequate controls. _calc_sample_fracs tries to balance sample size (decreased by scale) and validity (decreased by unbalanced dropping) Parameters ---------- t_df : pyspark.sql.DataFrame the treatment group c_can_df : pyspark.sqlDataFrame the control candidates match_col : str binned col to be matched Returns ------- pd.DataFrame the sample fractions for the treatment group w/ `match_col` and fraction columns pd.DataFrame the sample fractions for the control group. float the scale float the drop Raises ------ UncaughtExceptions See Also -------- _calc_optimal_subset Notes ----- """ _persist_if_unpersisted(t_df) _persist_if_unpersisted(c_can_df) t_counts = t_df.groupby(match_col).count().withColumnRenamed('count', 'treatment') c_can_counts = c_can_df.groupby(match_col).count().withColumnRenamed('count', 'control') fracs = t_counts.join(c_can_counts, on=[match_col]) fracs = fracs.toPandas() sample_fracs, scale, drop = _calc_optimal_subset(fracs=fracs, match_col=match_col) logging.getLogger(__name__).info("scale = {scale:.2f} drop: = {drop:.2f}".format(scale=scale, drop=drop)) return sample_fracs[[match_col, 'treatment_scaled_sample_fraction']],\ sample_fracs[[match_col, 'control_scaled_sample_fraction']],\ scale, drop @_time_log def _calc_optimal_subset(fracs: pd.DataFrame, match_col: str) -> Tuple[pd.DataFrame, float, float]: r""" return best sample fractions for given population iterate over range of possible scales and drops & assign utility return one of the fraction with max utility Parameters ---------- fracs : pd.DataFrame columns `match_col` and 'treatment' and 'control' ( match_col : str Returns ------- pd.DataFrame columns `match_col`, 'treatment_scaled_sample_fraction', 'control_scaled_sample_fraction' Raises ------ UncaughtExceptions See Also -------- _calc_sample_fracs _create_options_grid Notes ----- """ fracs = fracs.copy(deep=True) fracs['control_sample_fraction_naive'] = fracs['treatment']/fracs['control'] scale_factor = fracs.control_sample_fraction_naive.max()**-1 logging.getLogger(__name__).info("scale factor is {scale_factor:.2f} (coeffs for treatment w/ no drops".format(scale_factor=scale_factor)) # if no subscaling is necessary return fracs as is if scale_factor >= 1: logging.getLogger(__name__).info("can use all treatments safely, returning early") fracs['control_scaled_sample_fraction'] = fracs['control_sample_fraction_naive'] fracs['treatment_scaled_sample_fraction'] = 1 fracs = fracs[[match_col, 'treatment_scaled_sample_fraction', 'control_scaled_sample_fraction']] return fracs, float(1), float(0) options = _create_options_grid(fracs, scale_factor) options['utility'] = options.apply(_calc_util_wrapper, axis=1) # pick best max_util = options.utility.max() best_row = options[options.utility == max_util].iloc[0] winning_scale = float(best_row['scale']) winning_drop = float(best_row['percent_dropped']) logging.getLogger(__name__).info("max_util:{mu:.2f}\twinning_scale:{ws:.2f}\twinning_drop:{wd:.2f}".format(mu=max_util, ws=winning_scale, wd=winning_drop)) fracs['control_scaled_sample_fraction'] = np.min([(fracs['treatment'] * winning_scale/fracs['control']).values, [1]*len(fracs)], axis=0) fracs['treatment_scaled_sample_fraction'] = fracs['control_scaled_sample_fraction'] * fracs['control']/fracs['treatment'] fracs = fracs[[match_col, 'treatment_scaled_sample_fraction', 'control_scaled_sample_fraction']] return fracs, winning_scale, winning_drop @_time_log def _create_options_grid(fracs: pd.DataFrame, scale_factor: float) -> pd.DataFrame: r"""create 100 scale options & calc drop Parameters ---------- fracs: pd.DataFrame Array_like means all those objects -- lists, nested lists, etc. -- that can be converted to an array. We can also refer to variables like `var1`. scale_factor: float the multiplication factor if no treatment were dropped (e.g. the fraction of treatment at which dropping is no longer neccesary because the number of control candidates at each match col bucket is greater than or equal to the treatments) Returns ------- fracs : pd.DataFrame columns 'scale', 'percent_dropped', and 'number' (count of treatment) Raises ------ UncaughtExceptions """ fracs = fracs.copy(deep=True) scales = np.linspace(1, scale_factor, num=100, endpoint=True) options = pd.DataFrame(columns=['scale', 'percent_dropped', 'number']) for scale in scales: # calc new frac samples, maximum of 1 fracs['control_scaled_sample_fraction'] = np.min([(fracs['treatment'] * scale/fracs['control']).values, [1]*len(fracs)], axis=0) fracs['treatment_scaled_sample_fraction'] = fracs['control_scaled_sample_fraction'] * fracs['control']/fracs['treatment'] # calc %drop as difference of scale and actual ( e.g. where we pinned max at 1 in control scaled sample fraction) num_dropped = (fracs['treatment'] * (np.array([scale] * len(fracs)) - fracs['treatment_scaled_sample_fraction'])).sum() percent_dropped = num_dropped/(fracs['treatment'] * scale).sum() # calc new total number = (fracs['treatment']*fracs['treatment_scaled_sample_fraction']).sum() options = options.append({'scale': scale, 'percent_dropped': percent_dropped, 'number': number}, ignore_index=True) return options def _calc_util_wrapper(row): return _calc_util(row['number'], row['percent_dropped']) def _calc_util(number, dropped): # base utility log_value = math.log10(number/1000 + 1) # additional incentive to hit certain thresholds - chosen due to implementation factors threshold_boost = _logistic_function(L=math.log10(number / UTIL_BOOST_THRESH_1 + 1) / 10, x=number, x0=UTIL_BOOST_THRESH_1) \ + _logistic_function(L=math.log10(number / UTIL_BOOST_THRESH_2 + 1) / 10, x=number, x0=UTIL_BOOST_THRESH_2) \ + _logistic_function(L=math.log10(number / UTIL_BOOST_THRESH_3 + 1) / 10, x=number, x0=UTIL_BOOST_THRESH_3) # discount for unbalanced dropping dropped_penalty = 1-min(math.exp(dropped)-1, 1) utility = dropped_penalty * (log_value + threshold_boost) return utility def _logistic_function(x, L, k=1, x0=0): try: return L / (1 + math.exp(-k * (x - x0))) except OverflowError: if x >= x0: return L if x < x0: return 0 @_time_log def _sample_dfs(t_df: pyspark.sql.DataFrame, t_fracs: pd.DataFrame, c_can_df: pyspark.sql.DataFrame, c_fracs: pd.DataFrame, match_col: str) ->Tuple[DataFrame, DataFrame]: r"""given treatment and control pops and their stratified sample fracs, return balanced pops Parameters ---------- t_df : pyspark.DataFrame treatment pop t_fracs: pd.DataFrame with columns `match_col` and 'treatment_scaled_sample_fraction' c_can_df : pyspark.DataFrame control can pop c_fracs : pd.DataFrame with columns `match_col` and control_scaled_sample_fraction Returns ------- t_out : pyspark.sql.DataFrame c_out : pyspark.sql.DataFrame Raises ------ UncaughtExceptions """ _persist_if_unpersisted(t_df) _persist_if_unpersisted(c_can_df) t_fracs = t_fracs.set_index(match_col).treatment_scaled_sample_fraction.to_dict() t_dict = {} for key, value in t_fracs.items(): t_dict[int(key)] = min(float(value), 1) t_out = t_df.sampleBy(col=match_col, fractions=t_dict, seed=42) c_fracs = c_fracs.set_index(match_col).control_scaled_sample_fraction.to_dict() c_dict = {} for key, value in c_fracs.items(): c_dict[int(key)] = float(value) c_out = c_can_df.sampleBy(col=match_col, fractions=c_dict, seed=42) return t_out, c_out @_time_log def _assignment_match(df: DataFrame, prob_mod: mlc.Model, metric_col: str) ->Tuple[DataFrame, dict]: r"""match treatment to controls 1:1 Use Hungarian/Munkres algorithm in `metric_col` (typically probability) to find controls for your treatments with the least cost - the distance between a treatment's metric and its control's metric Parameters ---------- df: DataFrame dataframe in question, must have input columns specified by prob_mod prob_mod: mlc.Model propenisty predicting model. used
from typing import Tuple import pytest from more_itertools import only from adam.ontology.phase2_ontology import gravitationally_aligned_axis_is_largest from adam.axes import HorizontalAxisOfObject, FacingAddresseeAxis, AxesInfo from adam.language_specific.english.english_language_generator import ( PREFER_DITRANSITIVE, SimpleRuleBasedEnglishLanguageGenerator, USE_ADVERBIAL_PATH_MODIFIER, ATTRIBUTES_AS_X_IS_Y, IGNORE_COLORS, USE_ABOVE_BELOW, USE_NEAR, ) from adam.language_specific.english.english_phase_1_lexicon import ( GAILA_PHASE_1_ENGLISH_LEXICON, ) from adam.ontology import IN_REGION, IS_SPEAKER, IS_ADDRESSEE from adam.ontology.during import DuringAction from adam.ontology.phase1_ontology import ( AGENT, BOOK, BABY, TRUCK, BALL, BIRD, BOX, CHAIR, COOKIE, CUP, DAD, DRINK, DRINK_CONTAINER_AUX, EAT, FALL, FLY, GAILA_PHASE_1_ONTOLOGY, GIVE, GOAL, GREEN, GROUND, HAS, JUICE, MOM, PATIENT, PUSH, PUT, ROLL, SIT, TABLE, THEME, THROW, WATER, on, strictly_above, JUMP, JUMP_INITIAL_SUPPORTER_AUX, DOG, HOLLOW, GO, LEARNER, near, TAKE, CAR, ROLL_SURFACE_AUXILIARY, has, bigger_than, RED, BLACK, far, negate, WALK, HARD_FORCE, PASS, WALK_SURFACE_AUXILIARY, FAST, SLOW, ) from adam.ontology.phase1_spatial_relations import ( AWAY_FROM, DISTAL, EXTERIOR_BUT_IN_CONTACT, GRAVITATIONAL_DOWN, GRAVITATIONAL_UP, INTERIOR, Region, SpatialPath, Direction, PROXIMAL, VIA, TOWARD, ) from adam.random_utils import FixedIndexChooser from adam.relation import Relation, flatten_relations from adam.situation import Action, SituationObject from adam.situation.high_level_semantics_situation import HighLevelSemanticsSituation from adam_test_utils import situation_object from tests.sample_situations import make_bird_flies_over_a_house from tests.situation.situation_test import make_mom_put_ball_on_table _SIMPLE_GENERATOR = SimpleRuleBasedEnglishLanguageGenerator( ontology_lexicon=GAILA_PHASE_1_ENGLISH_LEXICON ) def test_common_noun(): situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[situation_object(BALL)] ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("a", "ball") def test_mass_noun(): situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[situation_object(WATER)] ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("water",) def test_proper_noun(): situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[situation_object(MOM)] ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("Mom",) def test_one_object(): box = situation_object(BOX) situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[box] ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("a", "box") def test_two_objects(): box_1 = situation_object(BOX, debug_handle="box_0") box_2 = situation_object(BOX, debug_handle="box_1") situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[box_1, box_2] ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("two", "box", "s") def test_two_objects_with_dad(): table_1 = situation_object(TABLE, debug_handle="table_0") table_2 = situation_object(TABLE, debug_handle="table_1") dad = situation_object(DAD, debug_handle="dad") situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[table_1, dad], other_objects=[table_2], always_relations=[ Relation( IN_REGION, dad, Region( table_1, distance=PROXIMAL, direction=Direction( positive=True, relative_to_axis=HorizontalAxisOfObject(table_1, index=0), ), ), ) ], ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("Dad", "beside", "a", "table") def test_many_objects(): ball_1 = situation_object(BALL, debug_handle="ball_0") ball_2 = situation_object(BALL, debug_handle="ball_1") ball_3 = situation_object(BALL, debug_handle="ball_2") situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[ball_1, ball_2, ball_3] ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("many", "ball", "s") def test_simple_verb(): mom = situation_object(MOM) table = situation_object(TABLE) situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[mom, table], actions=[ Action( action_type=PUSH, argument_roles_to_fillers=[(AGENT, mom), (THEME, table)] ) ], ) # TODO: address morphology to capture verb conjugation here assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("Mom", "pushes", "a", "table") def test_mom_put_a_ball_on_a_table(): situation = make_mom_put_ball_on_table() assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("Mom", "puts", "a", "ball", "on", "a", "table") def test_mom_put_a_ball_on_a_table_using_i(): mom = situation_object(ontology_node=MOM, properties=[IS_SPEAKER]) ball = situation_object(ontology_node=BALL) table = situation_object(ontology_node=TABLE) situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[mom, ball, table], actions=[ Action( PUT, ( (AGENT, mom), (THEME, ball), ( GOAL, Region( reference_object=table, distance=EXTERIOR_BUT_IN_CONTACT, direction=GRAVITATIONAL_UP, ), ), ), ) ], ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("I", "put", "a", "ball", "on", "a", "table") def test_mom_put_a_ball_on_a_table_using_you(): mom = situation_object(ontology_node=MOM, properties=[IS_ADDRESSEE]) ball = situation_object(ontology_node=BALL) table = situation_object(ontology_node=TABLE) situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[mom, ball, table], actions=[ Action( PUT, ( (AGENT, mom), (THEME, ball), ( GOAL, Region( reference_object=table, distance=EXTERIOR_BUT_IN_CONTACT, direction=GRAVITATIONAL_UP, ), ), ), ) ], ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("you", "put", "a", "ball", "on", "a", "table") def test_dad_put_a_cookie_in_a_box(): dad = situation_object(DAD) cookie = situation_object(COOKIE) box = situation_object(BOX) situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[dad, cookie, box], actions=[ Action( PUT, ( (AGENT, dad), (THEME, cookie), (GOAL, Region(reference_object=box, distance=INTERIOR)), ), ) ], ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("Dad", "puts", "a", "cookie", "in", "a", "box") def test_dad_put_a_cookie_in_a_box_using_i(): dad = situation_object(DAD, properties=[IS_SPEAKER]) cookie = situation_object(COOKIE) box = situation_object(BOX) situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[dad, cookie, box], actions=[ Action( PUT, ( (AGENT, dad), (THEME, cookie), (GOAL, Region(reference_object=box, distance=INTERIOR)), ), ) ], ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("I", "put", "a", "cookie", "in", "a", "box") def test_dad_put_a_cookie_in_a_box_using_you(): dad = situation_object(DAD, properties=[IS_ADDRESSEE]) cookie = situation_object(COOKIE) box = situation_object(BOX) situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[dad, cookie, box], actions=[ Action( PUT, ( (AGENT, dad), (THEME, cookie), (GOAL, Region(reference_object=box, distance=INTERIOR)), ), ) ], ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("you", "put", "a", "cookie", "in", "a", "box") def test_dad_put_a_cookie_in_a_box_using_my_as_dad_speaker(): dad = situation_object(DAD, properties=[IS_SPEAKER]) cookie = situation_object(COOKIE) box = situation_object(BOX) situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[dad, cookie, box], always_relations=[Relation(HAS, dad, box)], actions=[ Action( PUT, ( (AGENT, dad), (THEME, cookie), (GOAL, Region(reference_object=box, distance=INTERIOR)), ), ) ], ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("I", "put", "a", "cookie", "in", "my", "box") def test_dad_put_a_cookie_in_a_box_using_possession(): dad = situation_object(DAD) cookie = situation_object(COOKIE) box = situation_object(BOX) situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[dad, cookie, box], always_relations=[Relation(HAS, dad, box)], actions=[ Action( PUT, ( (AGENT, dad), (THEME, cookie), (GOAL, Region(reference_object=box, distance=INTERIOR)), ), ) ], ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("Dad", "puts", "a", "cookie", "in", "a", "box") def test_dad_put_a_cookie_in_a_box_using_you_your(): dad = situation_object(DAD, properties=[IS_ADDRESSEE]) cookie = situation_object(COOKIE) box = situation_object(BOX) situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[dad, cookie, box], always_relations=[Relation(HAS, dad, box)], actions=[ Action( PUT, ( (AGENT, dad), (THEME, cookie), (GOAL, Region(reference_object=box, distance=INTERIOR)), ), ) ], ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("you", "put", "a", "cookie", "in", "your", "box") def test_dad_put_a_cookie_in_a_box_using_my_as_mom_speaker(): dad = situation_object(DAD) cookie = situation_object(COOKIE) mom = situation_object(MOM, properties=[IS_SPEAKER]) box = situation_object(BOX) situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[dad, cookie, box], always_relations=[Relation(HAS, mom, box)], actions=[ Action( PUT, ( (AGENT, dad), (THEME, cookie), (GOAL, Region(reference_object=box, distance=INTERIOR)), ), ) ], ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("Dad", "puts", "a", "cookie", "in", "my", "box") def test_i_put_a_cookie_in_dads_box_using_my_as_mom_speaker(): dad = situation_object(DAD) cookie = situation_object(COOKIE) mom = situation_object(MOM, properties=[IS_SPEAKER]) box = situation_object(BOX) situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[mom, cookie, box, dad], always_relations=[Relation(HAS, dad, box)], actions=[ Action( PUT, ( (AGENT, mom), (THEME, cookie), (GOAL, Region(reference_object=box, distance=INTERIOR)), ), ) ], ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("I", "put", "a", "cookie", "in", "Dad", "'s", "box") def test_i_have_my_ball(): baby = situation_object(BABY, properties=[IS_SPEAKER]) ball = situation_object(BALL) situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[baby, ball], always_relations=[Relation(HAS, baby, ball)], ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("I", "have", "my", "ball") def test_dad_has_a_cookie(): dad = situation_object(DAD) cookie = situation_object(COOKIE) situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[dad, cookie], always_relations=[Relation(HAS, dad, cookie)], actions=[], ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("Dad", "has", "a", "cookie") def test_green_ball(): ball = situation_object(BALL, properties=[GREEN]) situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[ball] ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("a", "green", "ball") def test_path_modifier(): situation = make_bird_flies_over_a_house() assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("a", "bird", "flies", "over", "a", "house") def test_path_modifier_under(): bird = situation_object(BIRD) table = situation_object(TABLE) situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[bird, table], actions=[ Action( FLY, argument_roles_to_fillers=[(AGENT, bird)], during=DuringAction( at_some_point=[ Relation( IN_REGION, bird, Region( reference_object=table, distance=DISTAL, direction=GRAVITATIONAL_DOWN, ), ) ] ), ) ], ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("a", "bird", "flies", "under", "a", "table") def test_path_modifier_on(): mom = situation_object(MOM) ball = situation_object(BALL) table = situation_object(TABLE) situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[mom, ball, table], actions=[ Action( ROLL, argument_roles_to_fillers=[(AGENT, mom), (THEME, ball)], during=DuringAction( at_some_point=[ Relation( IN_REGION, ball, Region( reference_object=table, distance=EXTERIOR_BUT_IN_CONTACT, direction=GRAVITATIONAL_UP, ), ) ] ), ) ], ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("Mom", "rolls", "a", "ball", "on", "a", "table") def test_roll(): agent = situation_object(BABY) theme = situation_object(COOKIE) surface = situation_object(BOX) situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[agent, theme, surface], actions=[ Action( ROLL, argument_roles_to_fillers=[(AGENT, agent), (THEME, theme)], auxiliary_variable_bindings=[(ROLL_SURFACE_AUXILIARY, surface)], ) ], always_relations=[on(theme, surface)], ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("a", "baby", "rolls", "a", "cookie", "on", "a", "box") def test_noun_with_modifier(): table = situation_object(TABLE) ground = situation_object(GROUND) situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[table, ground], always_relations=[on(table, ground)], ) assert only( _SIMPLE_GENERATOR.generate_language(situation, FixedIndexChooser(0)) ).as_token_sequence() == ("a", "table", "on", "the", "ground") def test_fall_down_syntax_hint(): ball = situation_object(BALL) situation_without_modifier = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[ball], actions=[Action(FALL, argument_roles_to_fillers=[(THEME, ball)])], ) situation_with_modifier = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[ball], actions=[Action(FALL, argument_roles_to_fillers=[(THEME, ball)])], syntax_hints=[USE_ADVERBIAL_PATH_MODIFIER], ) assert generated_tokens(situation_without_modifier) == ("a", "ball", "falls") assert generated_tokens(situation_with_modifier) == ("a", "ball", "falls", "down") def test_action_with_advmod_and_preposition(): mom = situation_object(MOM) chair = situation_object(CHAIR) situation_with_advmod_and_preposition = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[mom, chair], actions=[ Action( SIT, argument_roles_to_fillers=[ (AGENT, mom), ( GOAL, Region( chair, direction=GRAVITATIONAL_UP, distance=EXTERIOR_BUT_IN_CONTACT, ), ), ], ) ], syntax_hints=[USE_ADVERBIAL_PATH_MODIFIER], ) assert generated_tokens(situation_with_advmod_and_preposition) == ( "Mom", "sits", "down", "on", "a", "chair", ) def test_transfer_of_possession(): mom = situation_object(MOM) baby = situation_object(BABY) cookie = situation_object(COOKIE) for (action, verb) in ((GIVE, "gives"), (THROW, "throws")): for prefer_ditransitive in (True, False): syntax_hints = [PREFER_DITRANSITIVE] if prefer_ditransitive else [] situation = HighLevelSemanticsSituation( ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[mom, baby, cookie], actions=[ Action( action_type=action, argument_roles_to_fillers=[ (AGENT, mom), (GOAL, baby), (THEME, cookie), ], ) ], syntax_hints=syntax_hints, ) reference_tokens: Tuple[str, ...] if prefer_ditransitive: reference_tokens = ("Mom", verb, "a", "baby", "a", "cookie") else: reference_tokens = ("Mom", verb, "a", "cookie", "to", "a", "baby") assert generated_tokens(situation) == reference_tokens def test_take_to_car(): baby = situation_object(BABY) ball = situation_object(BALL) car
dd in other_dims: # triangular smoothing kernel = [1, 2, 1] window = _window1d(g1, dd, [-1, 0, 1], bound, value) g1 = _lincomb(window, kernel, dd, ref=g1) # central finite differences kernel = [-1, 1] window = _window1d(g1, d, [-1, 1], bound, value) g1 = _lincomb(window, kernel, d, ref=g1) g1 = g1.square() if g1.requires_grad else g1.square_() g += g1 g = g.sqrt() if g.requires_grad else g.sqrt_() return g def _lincomb(slices, weights, dim, ref=None): """Perform the linear combination of a sequence of chunked tensors. Parameters ---------- slices : sequence[sequence[tensor]] First level contains elements in the combinations. Second level contains chunks. weights : sequence[number] Linear weights dim : int or sequence[int] Dimension along which the tensor was chunked. ref : tensor, optional Tensor whose data we do not want to overwrite. If provided, and some slices point to the same underlying data as `ref`, the slice is not written into inplace. Returns ------- lincomb : tensor """ slices = make_list(slices) dims = make_list(dim, len(slices)) result = None for chunks, weight, dim in zip(slices, weights, dims): # multiply chunk with weight chunks = make_list(chunks) if chunks: weight = torch.as_tensor(weight, dtype=chunks[0].dtype, device=chunks[0].device) new_chunks = [] for chunk in chunks: if chunk.numel() == 0: continue if not weight.requires_grad: # save computations when possible if weight == 0: new_chunks.append(chunk.new_zeros([]).expand(chunk.shape)) continue if weight == 1: new_chunks.append(chunk) continue if weight == -1: if ref is not None and not same_storage(ref, chunk): if any(s == 0 for s in chunk.stride()): chunk = -chunk else: chunk = chunk.neg_() else: chunk = -chunk new_chunks.append(chunk) continue if ref is not None and not same_storage(ref, chunk): if any(s == 0 for s in chunk.stride()): chunk = chunk * weight else: chunk *= weight else: chunk = chunk * weight new_chunks.append(chunk) # accumulate if result is None: if len(new_chunks) == 1: result = new_chunks[0] else: result = torch.cat(new_chunks, dim) else: offset = 0 for chunk in new_chunks: index = slice(offset, offset+chunk.shape[dim]) view = slice_tensor(result, index, dim) view += chunk offset += chunk.shape[dim] return result def _window1d(x, dim, offsets, bound='dct2', value=0): """Extract a sliding window from a tensor. Views are used to minimize allocations. Parameters ---------- x : tensor_like Input tensor dim : int Dimension along which to extract offsets offsets : [sequence of] int Offsets to extract, with respect to each voxel. To extract a centered window of length 3, use `offsets=[-1, 0, 1]`. bound : bound_like, default='dct2' Boundary conditions value : number, default=0 Filling value if `bound='constant'` Returns ------- win : [tuple of] tuple[tensor] If a sequence of offsets was provided, the first level corresponds to offsets. The second levels are tensors that could be concatenated along `dim` to generate the input tensor shifted by `offset`. However, to avoid unnecessary allocations, a list of (eventually empty) chunks is returned instead of the full shifted tensor. Some (hopefully most) of these tensors can be views into the input tensor. """ return_list = isinstance(offsets, (list, tuple)) offsets = make_list(offsets) return_list = return_list or len(offsets) > 1 x = torch.as_tensor(x) backend = dict(dtype=x.dtype, device=x.device) length = x.shape[dim] # sanity check for i in offsets: nb_pre = max(0, -i) nb_post = max(0, i) if nb_pre > x.shape[dim] or nb_post > x.shape[dim]: raise ValueError('Offset cannot be farther than one length away.') slices = [] for i in offsets: nb_pre = max(0, -i) nb_post = max(0, i) central = slice_tensor(x, slice(nb_post or None, -nb_pre or None), dim) if bound == 'dct2': pre = slice_tensor(x, slice(None, nb_pre), dim) pre = torch.flip(pre, [dim]) post = slice_tensor(x, slice(length-nb_post, None), dim) post = torch.flip(post, [dim]) slices.append(tuple([pre, central, post])) elif bound == 'dct1': pre = slice_tensor(x, slice(1, nb_pre+1), dim) pre = torch.flip(pre, [dim]) post = slice_tensor(x, slice(length-nb_post-1, -1), dim) post = torch.flip(post, [dim]) slices.append(tuple([pre, central, post])) elif bound == 'dst2': pre = slice_tensor(x, slice(None, nb_pre), dim) pre = -torch.flip(pre, [dim]) post = slice_tensor(x, slice(-nb_post, None), dim) post = -torch.flip(post, [dim]) slices.append(tuple([pre, central, post])) elif bound == 'dst1': pre = slice_tensor(x, slice(None, nb_pre-1), dim) pre = -torch.flip(pre, [dim]) post = slice_tensor(x, slice(length-nb_post+1, None), dim) post = -torch.flip(post, [dim]) shape1 = list(x.shape) shape1[dim] = 1 zero = torch.zeros([], **backend).expand(shape1) slices.append(tuple([pre, zero, central, zero, post])) elif bound == 'dft': pre = slice_tensor(x, slice(length-nb_pre, None), dim) post = slice_tensor(x, slice(None, nb_post), dim) slices.append(tuple([pre, central, post])) elif bound == 'replicate': shape_pre = list(x.shape) shape_pre[dim] = nb_pre shape_post = list(x.shape) shape_post[dim] = nb_post pre = slice_tensor(x, slice(None, 1), dim).expand(shape_pre) post = slice_tensor(x, slice(-1, None), dim).expand(shape_post) slices.append(tuple([pre, central, post])) elif bound == 'zero': shape_pre = list(x.shape) shape_pre[dim] = nb_pre shape_post = list(x.shape) shape_post[dim] = nb_post pre = torch.zeros([], **backend).expand(shape_pre) post = torch.zeros([], **backend).expand(shape_post) slices.append(tuple([pre, central, post])) elif bound == 'constant': shape_pre = list(x.shape) shape_pre[dim] = nb_pre shape_post = list(x.shape) shape_post[dim] = nb_post pre = torch.full([], value, **backend).expand(shape_pre) post = torch.full([], value, **backend).expand(shape_post) slices.append(tuple([pre, central, post])) slices = tuple(slices) if not return_list: slices = slices[0] return slices def im_divergence(dat, vx=None, which='forward', bound='constant'): """ Computes the divergence of 2D or 3D data. Args: dat (torch.tensor()): A 3D|4D tensor (2, X, Y) | (3, X, Y, Z). vx (tuple(float), optional): Voxel size. Defaults to (1, 1, 1). which (string, optional): Gradient type: . 'forward': Forward difference (next - centre) . 'backward': Backward difference (centre - previous) . 'central': Central difference ((next - previous)/2) Defaults to 'forward'. bound (string, optional): Boundary conditions: . 'circular' -> FFT . 'reflect' or 'reflect1' -> DCT-I . 'reflect2' -> DCT-II . 'replicate' -> replicates border values . 'constant zero' Defaults to 'constant zero' Returns: div (torch.tensor()): Divergence (X, Y) | (X, Y, Z). """ if vx is None: vx = (1,) * 3 if not isinstance(vx, torch.Tensor): vx = torch.tensor(vx, dtype=dat.dtype, device=dat.device) half = torch.tensor(0.5, dtype=dat.dtype, device=dat.device) ndim = len(dat.shape) - 1 bound = _bound_converter[bound] if which == 'forward': # Pad + reflected forward difference if ndim == 2: # 2D data x = utils.pad(dat[0, ...], (1, 0, 0, 0), mode=bound) x = x[:-1, :] - x[1:, :] y = utils.pad(dat[1, ...], (0, 0, 1, 0), mode=bound) y = y[:, :-1] - y[:, 1:] else: # 3D data x = utils.pad(dat[0, ...], (1, 0, 0, 0, 0, 0), mode=bound) x = x[:-1, :, :] - x[1:, :, :] y = utils.pad(dat[1, ...], (0, 0, 1, 0, 0, 0), mode=bound) y = y[:, :-1, :] - y[:, 1:, :] z = utils.pad(dat[2, ...], (0, 0, 0, 0, 1, 0), mode=bound) z = z[:, :, :-1] - z[:, :, 1:] elif which == 'backward': # Pad + reflected backward difference if ndim == 2: # 2D data x = utils.pad(dat[0, ...], (0, 1, 0, 0), mode=bound) x = x[:-1, :] - x[1:, :] y = utils.pad(dat[1, ...], (0, 0, 0, 1), mode=bound) y = y[:, :-1] - y[:, 1:] else: # 3D data x = utils.pad(dat[0, ...], (0, 1, 0, 0, 0, 0), mode=bound) x = x[:-1, :, :] - x[1:, :, :] y = utils.pad(dat[1, ...], (0, 0, 0, 1, 0, 0), mode=bound) y = y[:, :-1, :] - y[:, 1:, :] z = utils.pad(dat[2, ...], (0, 0, 0, 0, 0, 1), mode=bound) z = z[:, :, :-1] - z[:, :, 1:] elif which == 'central': # Pad + reflected central difference if ndim == 2: # 2D data x = utils.pad(dat[0, ...], (1, 1, 0, 0), mode=bound) x = half * (x[:-2, :] - x[2:, :]) y = utils.pad(dat[1, ...], (0, 0, 1, 1), mode=bound) y = half * (y[:, :-2] - y[:, 2:]) else: # 3D data x = utils.pad(dat[0, ...], (1, 1, 0, 0, 0, 0), mode=bound) x = half * (x[:-2, :, :] - x[2:, :, :]) y = utils.pad(dat[1, ...], (0, 0, 1, 1, 0, 0), mode=bound) y = half * (y[:, :-2, :] - y[:, 2:, :]) z = utils.pad(dat[2, ...], (0, 0, 0, 0, 1, 1), mode=bound) z = half * (z[:, :, :-2] - z[:, :,
policy. A higher number specifies a lower priority. If a request matches the listen policies of more than one virtual server the virtual server whose listen policy has the highest priority (the lowest priority number) accepts the request.<br/>Default value: 101<br/>Maximum length = 101 """ try : self._listenpriority = listenpriority except Exception as e: raise e @property def resrule(self) : """Default syntax expression specifying which part of a server's response to use for creating rule based persistence sessions (persistence type RULE). Can be either an expression or the name of a named expression. Example: HTTP.RES.HEADER("setcookie").VALUE(0).TYPECAST_NVLIST_T('=',';').VALUE("server1").<br/>Default value: "none". """ try : return self._resrule except Exception as e: raise e @resrule.setter def resrule(self, resrule) : """Default syntax expression specifying which part of a server's response to use for creating rule based persistence sessions (persistence type RULE). Can be either an expression or the name of a named expression. Example: HTTP.RES.HEADER("setcookie").VALUE(0).TYPECAST_NVLIST_T('=',';').VALUE("server1").<br/>Default value: "none" """ try : self._resrule = resrule except Exception as e: raise e @property def persistmask(self) : """Persistence mask for IP based persistence types, for IPv4 virtual servers.<br/>Minimum length = 1. """ try : return self._persistmask except Exception as e: raise e @persistmask.setter def persistmask(self, persistmask) : """Persistence mask for IP based persistence types, for IPv4 virtual servers.<br/>Minimum length = 1 """ try : self._persistmask = persistmask except Exception as e: raise e @property def v6persistmasklen(self) : """Persistence mask for IP based persistence types, for IPv6 virtual servers.<br/>Default value: 128<br/>Minimum length = 1<br/>Maximum length = 128. """ try : return self._v6persistmasklen except Exception as e: raise e @v6persistmasklen.setter def v6persistmasklen(self, v6persistmasklen) : """Persistence mask for IP based persistence types, for IPv6 virtual servers.<br/>Default value: 128<br/>Minimum length = 1<br/>Maximum length = 128 """ try : self._v6persistmasklen = v6persistmasklen except Exception as e: raise e @property def pq(self) : """Use priority queuing on the virtual server. based persistence types, for IPv6 virtual servers.<br/>Default value: OFF<br/>Possible values = ON, OFF. """ try : return self._pq except Exception as e: raise e @pq.setter def pq(self, pq) : """Use priority queuing on the virtual server. based persistence types, for IPv6 virtual servers.<br/>Default value: OFF<br/>Possible values = ON, OFF """ try : self._pq = pq except Exception as e: raise e @property def sc(self) : """Use SureConnect on the virtual server.<br/>Default value: OFF<br/>Possible values = ON, OFF. """ try : return self._sc except Exception as e: raise e @sc.setter def sc(self, sc) : """Use SureConnect on the virtual server.<br/>Default value: OFF<br/>Possible values = ON, OFF """ try : self._sc = sc except Exception as e: raise e @property def rtspnat(self) : """Use network address translation (NAT) for RTSP data connections.<br/>Default value: OFF<br/>Possible values = ON, OFF. """ try : return self._rtspnat except Exception as e: raise e @rtspnat.setter def rtspnat(self, rtspnat) : """Use network address translation (NAT) for RTSP data connections.<br/>Default value: OFF<br/>Possible values = ON, OFF """ try : self._rtspnat = rtspnat except Exception as e: raise e @property def m(self) : """Redirection mode for load balancing. Available settings function as follows: * IP - Before forwarding a request to a server, change the destination IP address to the server's IP address. * MAC - Before forwarding a request to a server, change the destination MAC address to the server's MAC address. The destination IP address is not changed. MAC-based redirection mode is used mostly in firewall load balancing deployments. * IPTUNNEL - Perform IP-in-IP encapsulation for client IP packets. In the outer IP headers, set the destination IP address to the IP address of the server and the source IP address to the subnet IP (SNIP). The client IP packets are not modified. Applicable to both IPv4 and IPv6 packets. * TOS - Encode the virtual server's TOS ID in the TOS field of the IP header. You can use either the IPTUNNEL or the TOS option to implement Direct Server Return (DSR).<br/>Default value: IP<br/>Possible values = IP, MAC, IPTUNNEL, TOS. """ try : return self._m except Exception as e: raise e @m.setter def m(self, m) : """Redirection mode for load balancing. Available settings function as follows: * IP - Before forwarding a request to a server, change the destination IP address to the server's IP address. * MAC - Before forwarding a request to a server, change the destination MAC address to the server's MAC address. The destination IP address is not changed. MAC-based redirection mode is used mostly in firewall load balancing deployments. * IPTUNNEL - Perform IP-in-IP encapsulation for client IP packets. In the outer IP headers, set the destination IP address to the IP address of the server and the source IP address to the subnet IP (SNIP). The client IP packets are not modified. Applicable to both IPv4 and IPv6 packets. * TOS - Encode the virtual server's TOS ID in the TOS field of the IP header. You can use either the IPTUNNEL or the TOS option to implement Direct Server Return (DSR).<br/>Default value: IP<br/>Possible values = IP, MAC, IPTUNNEL, TOS """ try : self._m = m except Exception as e: raise e @property def tosid(self) : """TOS ID of the virtual server. Applicable only when the load balancing redirection mode is set to TOS.<br/>Minimum length = 1<br/>Maximum length = 63. """ try : return self._tosid except Exception as e: raise e @tosid.setter def tosid(self, tosid) : """TOS ID of the virtual server. Applicable only when the load balancing redirection mode is set to TOS.<br/>Minimum length = 1<br/>Maximum length = 63 """ try : self._tosid = tosid except Exception as e: raise e @property def datalength(self) : """Length of the token to be extracted from the data segment of an incoming packet, for use in the token method of load balancing. The length of the token, specified in bytes, must not be greater than 24 KB. Applicable to virtual servers of type TCP.<br/>Minimum length = 1<br/>Maximum length = 100. """ try : return self._datalength except Exception as e: raise e @datalength.setter def datalength(self, datalength) : """Length of the token to be extracted from the data segment of an incoming packet, for use in the token method of load balancing. The length of the token, specified in bytes, must not be greater than 24 KB. Applicable to virtual servers of type TCP.<br/>Minimum length = 1<br/>Maximum length = 100 """ try : self._datalength = datalength except Exception as e: raise e @property def dataoffset(self) : """Offset to be considered when extracting a token from the TCP payload. Applicable to virtual servers, of type TCP, using the token method of load balancing. Must be within the first 24 KB of the TCP payload.<br/>Maximum length = 25400. """ try : return self._dataoffset except Exception as e: raise e @dataoffset.setter def dataoffset(self, dataoffset) : """Offset to be considered when extracting a token from the TCP payload. Applicable to virtual servers, of type TCP, using the token method of load balancing. Must be within the first 24 KB of the TCP payload.<br/>Maximum length = 25400 """ try : self._dataoffset = dataoffset except Exception as e: raise e @property def sessionless(self) : """Perform load balancing on a per-packet basis, without establishing sessions. Recommended for load balancing of intrusion detection system (IDS) servers and scenarios involving direct server return (DSR), where session information is unnecessary.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED. """ try : return self._sessionless except Exception as e: raise e @sessionless.setter def sessionless(self, sessionless) : """Perform load balancing on a per-packet basis, without establishing sessions. Recommended for load balancing of intrusion detection system (IDS) servers and scenarios involving direct server return (DSR), where session information is unnecessary.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED """ try : self._sessionless = sessionless except Exception as e: raise e @property def state(self) : """State of the load balancing virtual server.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED. """ try : return self._state except Exception as e: raise e @state.setter def state(self, state) : """State of the load balancing virtual server.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED """ try : self._state = state except Exception as e: raise e @property def connfailover(self) : """Mode in which the connection failover feature must operate for the virtual server. After a failover, established TCP connections and UDP packet flows are kept active and resumed on the secondary appliance. Clients remain connected to the same servers. Available settings function as follows: * STATEFUL - The primary appliance shares state information with the secondary appliance, in real time, resulting in some runtime processing overhead. * STATELESS - State information is not shared, and the new primary appliance tries to re-create the packet flow on the basis of the information contained in the packets it receives. * DISABLED - Connection failover does not occur.<br/>Default value: DISABLED<br/>Possible values = DISABLED, STATEFUL, STATELESS. """ try : return self._connfailover except Exception as e: raise e @connfailover.setter def connfailover(self, connfailover) : """Mode in which the connection failover feature must operate for the virtual server. After a failover, established TCP connections and UDP packet flows are kept active and resumed on the secondary appliance. Clients remain connected to
<filename>test/dbtvault_harness_utils.py import glob import json import logging import os import re import shutil import sys from hashlib import md5, sha256 from pathlib import Path from typing import List import pandas as pd import pexpect import yaml from _pytest.fixtures import FixtureRequest from behave.model import Table from environs import Env from numpy import NaN from pandas import Series import test def platform(): """Gets the target platform as set by the user via the invoke CLI, stored in invoke.yml""" if os.path.isfile(test.INVOKE_YML_FILE): with open(test.INVOKE_YML_FILE) as config: config_dict = yaml.safe_load(config) plt = config_dict.get('platform').lower() if plt not in test.AVAILABLE_PLATFORMS: test.logger.error(f"Platform must be set to one of: {', '.join(test.AVAILABLE_PLATFORMS)} " f"in '{test.INVOKE_YML_FILE}'") sys.exit(0) else: return plt else: test.logger.error(f"'{test.INVOKE_YML_FILE}' not found. Please run 'inv setup'") sys.exit(0) def setup_db_creds(plt): required_keys = { "snowflake": [ "SNOWFLAKE_DB_ACCOUNT", "SNOWFLAKE_DB_USER", "SNOWFLAKE_DB_PW", "SNOWFLAKE_DB_ROLE", "SNOWFLAKE_DB_DATABASE", "SNOWFLAKE_DB_WH", "SNOWFLAKE_DB_SCHEMA"], "bigquery": [ "GCP_PROJECT_ID", "GCP_DATASET"], "sqlserver": [ "SQLSERVER_DB_SERVER", "SQLSERVER_DB_PORT", "SQLSERVER_DB_DATABASE", "SQLSERVER_DB_SCHEMA", "SQLSERVER_DB_USER", "SQLSERVER_DB_PW" ] } env = Env() if os.path.isfile(test.OP_DB_FILE): env.read_env(test.OP_DB_FILE) details = {key: env(key) for key in required_keys[plt]} if not all([v for v in details.values()]): test.logger.error(f"{str(plt).title()} environment details incomplete or not found. " f"Please check your 'env/db.env' file " f"or ensure the required variables are added to your environment: " f"{', '.join(required_keys[plt])}") sys.exit(0) else: return details def setup_environment(): p = platform() setup_db_creds(plt=p) if not os.getenv('DBT_PROFILES_DIR') and os.path.isfile(test.PROFILE_DIR / 'profiles.yml'): os.environ['DBT_PROFILES_DIR'] = str(test.PROFILE_DIR) os.environ['PLATFORM'] = p def inject_parameters(file_contents: str, parameters: dict): """ Replace placeholders in a file with the provided dictionary :param file_contents: String containing expected file contents :param parameters: Dictionary of parameters {placeholder: value} :return: Parsed/injected file """ if not parameters: return file_contents else: for key, val in parameters.items(): file_contents = re.sub(rf'\[{key}]', val, file_contents, flags=re.IGNORECASE) remaining_placeholders = re.findall("|".join([rf'\[{key}]' for key in parameters.keys()]), file_contents) if remaining_placeholders: raise ValueError(f"Unable to replace some placeholder values: {', '.join(remaining_placeholders)}") return file_contents def clean_target(): """ Deletes content in target folder (compiled SQL) Faster than running dbt clean. """ shutil.rmtree(test.TEST_PROJECT_ROOT / 'target', ignore_errors=True) def clean_csv(model_name=None): """ Deletes csv files in csv folder. """ if model_name: delete_files = [test.CSV_DIR / f"{model_name.lower()}.csv"] else: delete_files = [file for file in glob.glob(str(test.CSV_DIR / '*.csv'), recursive=True)] for file in delete_files: if os.path.isfile(file): os.remove(file) def clean_models(model_name=None): """ Deletes models in features folder. """ if model_name: delete_files = [test.TEST_MODELS_ROOT / f"{model_name.lower()}.sql"] else: delete_files = [file for file in glob.glob(str(test.TEST_MODELS_ROOT / '*.sql'), recursive=True)] for file in delete_files: if os.path.isfile(file): os.remove(file) def create_dummy_model(): """ Create dummy model to avoid unused config warning """ with open(test.TEST_MODELS_ROOT / 'dummy.sql', 'w') as f: f.write('SELECT 1') def is_full_refresh(context): return getattr(context, 'full_refresh', False) def is_successful_run(dbt_logs: str): return 'Done' in dbt_logs and 'SQL compilation error' not in dbt_logs def is_pipeline(): return os.getenv('PIPELINE_JOB') and os.getenv('PIPELINE_BRANCH') def parse_hashdiffs(columns_as_series: Series) -> Series: """ Evaluate strings surrounded with hashdiff() and exclude_hashdiff() to augment the YAML metadata and configure hashdiff columns for staging. :param columns_as_series: Columns from a context.table in Series form. :return: Modified series """ standard_pattern = r"^(?:hashdiff\(')(.*)(?:'\))" exclude_pattern = r"^(?:exclude_hashdiff\(')(.*)(?:'\))" columns = [] for item in columns_as_series: if re.search(standard_pattern, item): raw_item = re.findall(standard_pattern, item)[0] split_item = str(raw_item).split(",") hashdiff_dict = {"is_hashdiff": True, "columns": split_item} columns.append(hashdiff_dict) elif re.search(exclude_pattern, item): raw_item = re.findall(exclude_pattern, item)[0] split_item = str(raw_item).split(",") hashdiff_dict = {"is_hashdiff": True, "exclude_columns": True, "columns": split_item} columns.append(hashdiff_dict) else: columns.append(item) return Series(columns) def parse_lists_in_dicts(dicts_with_lists: List[dict]) -> list: """ Convert string representations of lists in dict values, in a list of dicts :param dicts_with_lists: A list of dictionaries """ if isinstance(dicts_with_lists, list): processed_dicts = [] check_dicts = [k for k in dicts_with_lists if isinstance(k, dict)] if not check_dicts: return dicts_with_lists else: for i, col in enumerate(dicts_with_lists): processed_dicts.append(dict()) if isinstance(col, dict): for k, v in col.items(): if {"[", "]"}.issubset(set(str(v))) and isinstance(v, str): v = v.replace("[", "") v = v.replace("]", "") v = [k.strip() for k in v.split(",")] processed_dicts[i][k] = v else: processed_dicts[i] = {col: dicts_with_lists[i]} return processed_dicts else: return dicts_with_lists def process_stage_names(context, processed_stage_name): """ Output a list of stage names if multiple stages are being used, or a single stage name if only one. """ if hasattr(context, "processed_stage_name") and not getattr(context, 'disable_union', False): stage_names = context.processed_stage_name if isinstance(stage_names, list): stage_names.append(processed_stage_name) else: stage_names = [stage_names] + [processed_stage_name] stage_names = list(set(stage_names)) if isinstance(stage_names, list) and len(stage_names) == 1: stage_names = stage_names[0] return stage_names else: return processed_stage_name def filter_metadata(context, metadata: dict) -> dict: """ Remove metadata indicated by fixtures :param context: Behave context :param metadata: Metadata dictionary containing macro parameters """ if getattr(context, 'disable_payload', False): metadata = {k: v for k, v in metadata.items() if k != "src_payload"} return metadata def calc_hash(columns_as_series: Series) -> Series: """ Calculates the MD5 hash for a given value :param columns_as_series: A pandas Series of strings for the hash to be calculated on. In the form of "md5('1000')" or "sha('1000')" :return: Hash (MD5 or SHA) of values as Series (used as column) """ patterns = { 'md5': { 'pattern': r"^(?:md5\(')(.*)(?:'\))", 'function': md5}, 'sha': { 'pattern': r"^(?:sha\(')(.*)(?:'\))", 'function': sha256}} hashed_list = [] for item in columns_as_series: active_hash_func = [pattern for pattern in patterns if pattern in item] if active_hash_func: active_hash_func = active_hash_func[0] raw_item = re.findall(patterns[active_hash_func]['pattern'], item)[0] hash_func = patterns[active_hash_func]['function'] hashed_item = str(hash_func(raw_item.encode('utf-8')).hexdigest()).upper() hashed_list.append(hashed_item) else: hashed_list.append(item) return Series(hashed_list) def set_custom_names(): """ Database and schema names for generated SQL during macro tests changes based on user. This function generates those names. """ def sanitise_strings(unsanitised_str): return unsanitised_str.replace("-", "_").replace(".", "_").replace("/", "_").replace(' ', '_') pipeline_metadata = { "snowflake": { "SCHEMA_NAME": f"{os.getenv('SNOWFLAKE_DB_SCHEMA')}_{os.getenv('SNOWFLAKE_DB_USER')}" f"_{os.getenv('PIPELINE_BRANCH')}_{os.getenv('PIPELINE_JOB')}".upper(), "DATABASE_NAME": os.getenv('SNOWFLAKE_DB_DATABASE') } } local_metadata = { "snowflake": { "SCHEMA_NAME": f"{os.getenv('SNOWFLAKE_DB_SCHEMA')}_{os.getenv('SNOWFLAKE_DB_USER')}".upper(), "DATABASE_NAME": os.getenv('SNOWFLAKE_DB_DATABASE') }, "bigquery": { "DATASET_NAME": f"{os.getenv('GCP_DATASET')}_{os.getenv('GCP_USER')}".upper() } } if is_pipeline(): return {k: sanitise_strings(v) for k, v in pipeline_metadata[platform()].items()} else: return {k: sanitise_strings(v) for k, v in local_metadata[platform()].items()} def run_dbt_command(command) -> str: """ Run a command in dbt and capture dbt logs. :param command: Command to run. :return: dbt logs """ if 'dbt' not in command and isinstance(command, list): command = ['dbt'] + command elif 'dbt' not in command and isinstance(command, str): command = ['dbt', command] joined_command = " ".join(command) test.logger.log(msg=f"Running with dbt command: {joined_command}", level=logging.INFO) child = pexpect.spawn(command=joined_command, cwd=test.TEST_PROJECT_ROOT, encoding="utf-8") child.logfile_read = sys.stdout logs = child.read() child.close() return logs def run_dbt_seeds(seed_file_names=None, full_refresh=False) -> str: """ Run seed files in dbt :return: dbt logs """ if isinstance(seed_file_names, str): seed_file_names = [seed_file_names] command = ['dbt', 'seed'] if seed_file_names: command.extend(['--select', " ".join(seed_file_names), '--full-refresh']) if "full-refresh" not in command and full_refresh: command.append('--full-refresh') return run_dbt_command(command) def run_dbt_seed_model(seed_model_name=None) -> str: """ Run seed model files in dbt :return: dbt logs """ command = ['dbt', 'run'] if seed_model_name: command.extend(['-m', seed_model_name, '--full-refresh']) return run_dbt_command(command) def run_dbt_models(*, mode='compile', model_names: list, args=None, full_refresh=False) -> str: """ Run or Compile a specific dbt model, with optionally provided variables. :param mode: dbt command to run, 'run' or 'compile'. Defaults to compile :param model_names: List of model names to run :param args: variable dictionary to provide to dbt :param full_refresh: Run a full refresh :return Log output of dbt run operation """ model_name_string = " ".join(model_names) command = ['dbt', mode, '-m', model_name_string] if full_refresh: command.append('--full-refresh') if args: args = json.dumps(args) command.extend([f"--vars '{args}'"]) return run_dbt_command(command) def run_dbt_operation(macro_name: str, args=None) -> str: """ Run a specified macro in dbt, with the given arguments. :param macro_name: Name of macro/operation :param args: Arguments to provide :return: dbt logs """ command = ['run-operation', f'{macro_name}'] if args: args = str(args).replace('\'', '') command.extend([f"--args '{args}'"]) return run_dbt_command(command) def replace_test_schema(): """ Drop and create the TEST schema """ run_dbt_operation(macro_name='recreate_current_schemas') def create_test_schemas(): """ Create TEST schemas """ run_dbt_operation(macro_name='create_test_schemas') def drop_test_schemas(): """ Drop TEST schemas """ run_dbt_operation(macro_name='drop_test_schemas') def context_table_to_df(table: Table, use_nan=True) -> pd.DataFrame: """ Converts a context table in a feature file into a pandas DataFrame :param table: The context.table from a scenario :param use_nan: Replace <null> placeholder with NaN :return: DataFrame representation of the provide context table """ table_df = pd.DataFrame(columns=table.headings, data=table.rows) table_df = table_df.apply(calc_hash) table_df = table_df.apply(parse_hashdiffs) if use_nan: table_df = table_df.replace("<null>", NaN) return table_df def context_table_to_csv(table: Table, model_name: str) -> str: """ Converts a context table in a feature file into CSV format :param table: The context.table from a scenario :param model_name: Name of the model to create :return: Name of csv file (minus extension) """ table_df = context_table_to_df(table) csv_fqn = test.CSV_DIR / f'{model_name.lower()}_seed.csv' table_df.to_csv(path_or_buf=csv_fqn, index=False) test.logger.log(msg=f'Created {csv_fqn.name}', level=logging.DEBUG) return csv_fqn.stem def context_table_to_dicts(table: Table, orient='index', use_nan=True) -> List[dict]: """ Converts a context table in a feature file into
is no formal proof that ECDSA, even with this additional restriction, is free of other malleability. Commonly used serialization schemes will also accept various non-unique encodings, so care should be taken when this property is required for an application. The secp256k1_ecdsa_sign function will by default create signatures in the lower-S form, and secp256k1_ecdsa_verify will not accept others. In case signatures come from a system that cannot enforce this property, secp256k1_ecdsa_signature_normalize must be called before verification. Args: ctx (secp256k1_context*): a secp256k1 context object sigin (secp256k1_ecdsa_signature*): a pointer to a signature to check/normalize (cannot be NULL, can be identical to sigout) Returns: (int, secp256k1_ecdsa_signature*): (1 if sigin was not normalized, 0 if it already was, a pointer to a signature to fill with the normalized form, or copy if the input was already normalized. (can be NULL if you're only interested in whether the input was already normalized). ''' # Validate context utils.validate_context(ctx) # Validate sig utils.validate_signature(sigin) # Pointer to a signature to fill witht he normalized form, or copy if the # input was already normalized sigout = ffi.new('secp256k1_ecdsa_signature *') return (lib.secp256k1_ecdsa_signature_normalize(ctx, sigout, sigin), sigout) def ecdsa_sign( ctx, msg32, seckey, noncefp=ffi.NULL, ndata=ffi.NULL): '''Create an ECDSA signature. The created signature is always in lower-S form. See secp256k1_ecdsa_signature_normalize for more details. Args: ctx (secp256k1_context*): a secp256k1 context object, initialized for signing msg32 (bytes): the 32-byte message hash being signed (cannot be NULL) seckey (bytes): pointer to a 32-byte secret key (cannot be NULL) noncefp (secp256k1_nonce_function): pointer to a nonce generation function. If NULL, secp256k1_nonce_function_default is used ndata (void*): pointer to arbitrary data used by the nonce generation function (can be NULL) Returns: (int, secp256k1_ecdsa_signature*): (1: signature created, 0: the nonce generation function failed, or the private key was invalid, pointer to an array where the signature will be placed (cannot be NULL)) ''' # Validate context utils.validate_context(ctx) # Validate msg32 utils.validate_msg32_ser(msg32) # Validate secret key utils.validate_secret_key_ser(seckey) # Validate noncefp utils.validate_noncefp(noncefp) # Validate ndata utils.validate_ndata(ndata) sig = ffi.new('secp256k1_ecdsa_signature *') return (lib.secp256k1_ecdsa_sign(ctx, sig, msg32, seckey, noncefp, ndata), sig) def ec_seckey_verify(ctx, seckey): '''Verify an ECDSA secret key. Args: ctx (secp256k1_context*): a secp256k1 context object (cannot be NULL) seckey (bytes): pointer to a 32-byte secret key (cannot NULL) Returns: (int): 1 if secret key is valid, 0 if secret key is invalid ''' # Validate context utils.validate_context(ctx) # Validate secret key utils.validate_secret_key_ser(seckey) return lib.secp256k1_ec_seckey_verify(ctx, seckey) def ec_pubkey_create(ctx, seckey): '''Compute the public key for a secret key. Args: ctx (secp256k1_context*): a secp256k1 context object, initialized for signing (cannot be NULL) seckey (bytes): pointer to a 32-byte private key (cannot be NULL) Returns: (int, secp256k1_pubkey): (1 if secret was valid, public key stores, 0 if secret was invalid, try again, pointer to the created public key (cannot be NULL)) ''' # Validate context utils.validate_context(ctx) # Validate secret key utils.validate_secret_key_ser(seckey) # Pointer to the created public key pubkey = ffi.new('secp256k1_pubkey *') # Compute the public key for a secret key return (lib.secp256k1_ec_pubkey_create(ctx, pubkey, seckey), pubkey) def ec_privkey_negate(ctx, seckey): '''Negates a private key in place. Args: ctx (secp256k1_context*): a secp256k1 context object seckey (bytes): pointer to a 32-byte private key to be negated (cannot be NULL) Returns: (int, bytes): (1 always, pointer to the 32-byte private key to be negated (cannot be NULL)) ''' # Validate context utils.validate_context(ctx) # Validate secret key utils.validate_secret_key_ser(seckey) # Negate a private key in place return (lib.secp256k1_ec_privkey_negate(ctx, seckey), seckey) def ec_pubkey_negate(ctx, pubkey): '''Negates a public key in place. Args: ctx (secp256k1_context*): a secp256k1 context object pubkey (secp256k1_pubkey*): pointer to the public key to be negated (cannot be NULL) Returns: (int, secp256k1_pubkey*): (1 always, pointer to the public key to be negated (cannot be NULL)) ''' # Validate context utils.validate_context(ctx) # Validate public key utils.validate_public_key(pubkey) return (lib.secp256k1_ec_pubkey_negate(ctx, pubkey), pubkey) def ec_privkey_tweak_add(ctx, seckey, tweak): '''Tweak a private key by adding tweak to it. Args: ctx (secp256k1_context*): pointer to a context object (cannot be NULL). seckey (bytes): a 32-byte private key tweak (bytes): a 32-byte tweak Returns: (int, bytes): (0 if the tweak was out of range (change of around 1 in 2^128 for uniformly random 32-byte arrays), or if the resulting private key would be invalid (only when the tweak is the complement of the corresponding private key). 1 otherwise, a pointer to a secp256k1_pubkey containing tweaked public key, a 32-byte private key) ''' # Validate context utils.validate_context(ctx) # Validate secret key utils.validate_secret_key_ser(seckey) # Validate tweak utils.validate_tweak_ser(tweak) return (lib.secp256k1_ec_privkey_tweak_add(ctx, seckey, tweak), seckey) def ec_pubkey_tweak_add(ctx, pubkey, tweak): ''' Tweak a public key by adding tweak times the generator to it. Args: ctx (secp256k1_context*): pointer to a context object (cannot be NULL) pubkey (secp256k1_pubkey*): pointer to a public key object tweak (bytes): pointer to a 32-byte tweak Returns: (int, secp256k1_pubkey*): (0 if the tweak was out of range (change of around 1 in 2^128 for uniformly random 32-byte arrays), or if the resulting public key would be invalid (only when the tweak is the complement of the corresponding private key). 1 otherwise, a pointer to a secp256k1_pubkey containing tweaked public key) ''' # Validate context utils.validate_context(ctx) # Validate public key utils.validate_public_key(pubkey) # Validate tweak utils.validate_tweak_ser(tweak) return (lib.secp256k1_ec_pubkey_tweak_add(ctx, pubkey, tweak), pubkey) def ec_privkey_tweak_mul(ctx, seckey, tweak): '''Tweak a private key by multiplying it by a tweak. Args: ctx (secp256k1_context*): pointer to a context object (cannot be NULL) seckey (bytes): pointer to a 32-byte private key tweak (bytes): pointer to a 32-byte tweak Returns: (int, seckey): (0 if the tweak was out of range (chance of around 1 in 2^128 for uniformly random 32-byte arrays, or equal to zero. 1 otherwise, pointer to a 32-byte private key) ''' # Validate context utils.validate_context(ctx) # Validate secret key utils.validate_secret_key_ser(seckey) # Validate tweak utils.validate_tweak_ser(tweak) return (lib.secp256k1_ec_privkey_tweak_mul(ctx, seckey, tweak), seckey) def ec_pubkey_tweak_mul(ctx, pubkey, tweak): '''Tweak a public key by multiplying it by a tweak value. Args: ctx (secp256k1_context*): pointer to a context object initialized for validation (cannot be NULL) pubkey (secp2561_pubkey*): pointer to a public key object tweak (bytes): pointer to a 32-byte tweak Returns: (int, secp256k1_pubkey*): (0 if the tweak was out of range (chance of around 1 in 2^128 for uniformly random 32-byte arrays, or equal to zero. 1 otherwise, pointer to a public key object) ''' # Validate context utils.validate_context(ctx) # Validate public key utils.validate_public_key(pubkey) # Validate tweak utils.validate_tweak_ser(tweak) return (lib.secp256k1_ec_pubkey_tweak_mul(ctx, pubkey, tweak), pubkey) def context_randomize(ctx, seed32): '''Updates the context randomization to protect against side-channel leakage. While secp256k1 code is written to be constant-time no matter what secret values are, it's possible that a future compiler may output code which isn't, and also that the CPU may not emit the same radio frequencies or draw the same amount power for all values. This function provides a seed which is combined into the blinding value: that blinding value is added before each multiplication (and removed afterwards) so that it does not affect function results, but shields against attacks which rely on any input-dependent behaviour. You should call this after secp256k1_context_create or secp256k1_context_clone, and may call this repeatedly afterwards. Args: ctx (secp256k1_context*): pointer to a context object (cannot be NULL) seed32 (bytes): pointer to a 32-byte random seed (NULL resets to initial state) Returns: (int): 1 if randomization successfully updated 0 if error ''' # Validate context utils.validate_context(ctx) # Validate seed32 utils.validate_bytes_type( seed32, [32], 'Invalid seed32. Must be 32-bytes.') return lib.secp256k1_context_randomize(ctx, seed32) def ec_pubkey_combine(ctx, pubkeys): '''Add a number of public keys together. Args: ctx (secp256k1_context*): pointer to a context object pubkeys (list): list of pubkeys to add together Returns: (int, secp256k1_pubkey*): (1: the sum of the public keys is valid 0: the sum of the public keys is not valid, pointer to a public key object for placing the resulting public key (cannot be NULL)) ''' # Validate context utils.validate_context(ctx) # Number of public keys to add together n = len(pubkeys) # Pointer to array of pointers to public keys (cannot be null) ins = ffi.new('secp256k1_pubkey[]', n) ins = ffi.new(
root_struct = None meta = None meta_tups = None else: # shared memory path if sys.platform != 'win32': # TJD this path needs to be tested more if has_ext: return _read_sds(path, sharename=sharename, info=info, include=include, stack=stack, sections=sections, threads=threads, filter=filter) dir, schema, checkdir, root_struct, meta, meta_tups = _load_sds_mem(path, name=name, sharename=sharename, info=info, include_all_sds=include_all_sds, include=include, threads=threads) else: # NOTE: windows shared memory does not support dataset nesting via a struct currently.. # but it could with a little more work return _read_sds(path, sharename=sharename, info=info, include=include, stack=stack, threads=threads, folders=folders, sections=sections, filter=filter) # root struct still needs to be initialized - windows sharedmemory load has root struct already # linux has a normal directory listing from the file system if checkdir: if name is None: name = f'_root.sds' if name not in dir: # directories with SDS and no _root are pretty common, killing this warning #warnings.warn(f'Could not find _root.sds file. Loading files in {dir} into container struct.') root_struct = TypeRegister.Struct({}) meta = None else: # build the initial struct from root sds del schema['_root'] root_struct, meta, meta_tups = _init_root_container(path, '_root', sharename=sharename, info=info, include=include, threads=threads) file_prefix=None else: # tiers can be separated by /, but files will be named with ! name = name.replace('/','!') if sds_endswith(name, add=False): name = name[:-4] # use name keyword to snipe one dataset or struct if name + SDS_EXTENSION in dir: root_struct, meta, meta_tups = _init_root_container(path, name, sharename=sharename, info=info, include=include, threads=threads) file_prefix = name name = name.split('!') # climb down tiers for tier in name: schema = schema[tier] else: raise ValueError(f"Could not find .sds file for {name} in {path}") # TODO: write something to handle name keyword in shared memory else: file_prefix = None final_sort = None root_file_found = (meta is not None) # possibly load from extra files in directory include_extra = False if root_file_found: final_sort = _order_from_meta(root_struct, meta, meta_tups) # all items will be included if no root file was found if final_sort is not None: # check for extra files, see if user wants to include include_extra = _include_extra_sds_files(schema, final_sort, include_all_sds, include=include) # choose the correct recursive function (full load or just info) # the recursive function will crawl other structures, or dictionaries from tree if info: #build_func = _summary_from_schema nocrawl = str else: #build_func = _struct_from_schema nocrawl = np.ndarray #multiload = None multiload = [] # load individual files # not supported for shared memory # include keyword behaves differently than with an individual file load, so take the less common path for that too if multiload is None or sharename is not None or include is not None: # ---- main load for entire directory for k, v in schema.items(): if include is not None: if k not in include: continue try: item = root_struct[k] # none indicates that the structure was initialized, but data hasn't been loaded from file # this helps preserve item order in struct if item is None: #root_struct[k] = build_func(schema, path, dir, filename=file_prefix, root=k, sharename=sharename, include=include) root_struct[k] = _sds_load_from_schema(schema, path, dir, filename=file_prefix, root=k, sharename=sharename, include=None, info=info, nocrawl=nocrawl, threads=threads) #root_struct[k] = _sds_load_from_schema(schema, path, dir, filename=file_prefix, root=k, sharename=sharename, include=include, info=info, nocrawl=nocrawl, threads=threads) else: warnings.warn(f"Found .sds file for item {k}, but was already in struct as {root_struct[k]}. Skipping .sds load.", stacklevel=2) except: #root_struct[k] = build_func(schema, path, dir, filename=file_prefix, root=k, sharename=sharename) root_struct[k] = _sds_load_from_schema(schema, path, dir, filename=file_prefix, root=k, sharename=sharename, include=None, info=info, nocrawl=nocrawl, threads=threads) #root_struct[k] = _sds_load_from_schema(schema, path, dir, filename=file_prefix, root=k, sharename=sharename, include=include, info=info, nocrawl=nocrawl, threads=threads) # in this branch, flip to multi-file load else: # first pass, collect all filepaths # TODO: fold this into one pass, store return index in some kind of nested dictionary? for k, v in schema.items(): if include is not None: if k not in include: continue try: item = root_struct[k] if item is None: _ = _sds_load_from_schema(schema, path, dir, filename=file_prefix, root=k, sharename=sharename, include=None, info=info, nocrawl=nocrawl, multiload=multiload) else: pass except: _ = _sds_load_from_schema(schema, path, dir, filename=file_prefix, root=k, sharename=sharename, include=None, info=info, nocrawl=nocrawl, multiload=multiload) # call multiload, loads all into list # NEW: pass in list of known good files multiload = decompress_dataset_internal(multiload, sharename=sharename, info=info, include=include, stack=stack, threads=threads, filter=filter, goodfiles=goodfiles) #if isinstance(multiload, tuple): # multiload = [multiload] # second pass, build nested containers # fake python int pointer to index the order of loaded files, restore correct hierarchy multiload_idx = [0] for k, v in schema.items(): if include is not None: if k not in include: continue try: item = root_struct[k] if item is None: root_struct[k] = _sds_load_from_schema(schema, path, dir, filename=file_prefix, root=k, sharename=sharename, include=None, info=info, nocrawl=nocrawl, multiload=multiload, multiload_idx=multiload_idx) else: pass except: root_struct[k] = _sds_load_from_schema(schema, path, dir, filename=file_prefix, root=k, sharename=sharename, include=None, info=info, nocrawl=nocrawl, multiload=multiload, multiload_idx=multiload_idx) # if root file found for metadata, sort items in root struct # if no root file found, order will be same as directory order if root_file_found and final_sort is not None and include is None: # if any files from original list were not included in final struct, list them, remove from sort list missing = [] for item in final_sort: rm_item = False try: v = root_struct[item] except: rm_item = True else: # initialized from root info, but no .sds file found - value will be None if v is None: rm_item = True if rm_item: warn_missing = True if include is not None: if rm_item not in include: warn_missing = False if warn_missing: warnings.warn(f"Could not load data for item {item}, file for this item may be missing.") missing.append(item) for item in missing: final_sort.remove(item) root_struct = root_struct[final_sort] # if extra files were added to root struct, optionally rebuild _root.sds # if all extra files were included, skip the prompt, but don't rewrite the _root.sds file if include_extra and not include_all_sds: prompt = f"Include extra items in root struct for future loads? (_root.sds will be rebuilt) (y/n) " while(True): choice = input(prompt) if choice in 'Yy': _write_to_sds(root_struct, path, name='_root', compress=True, sharename=sharename) break elif choice in 'Nn': break return root_struct #------------------------------------------------------------------------------------ def _sds_load_from_schema(schema, path, dir, filename=None, root=None, sharename=None, include=None, info=False, nocrawl=np.ndarray, multiload=None, multiload_idx=None, multiload_schema=None, threads=None): r''' Recursive function for loading data or info from .sds directory. Nested structures are stored: Example: -------- >>> st = Struct({ 'a': Struct({ 'arr' : arange(10), 'a2' : Dataset({ 'col1': arange(5) }) }), 'b': Struct({ 'ds1' : Dataset({ 'ds1col': arange(6) }), 'ds2' : Dataset({ 'ds2col' : arange(7) }) }), }) >>> st.tree() Struct ├──── a (Struct) │ ├──── arr int32 (10,) 4 │ └──── a2 (Dataset) │ └──── col1 int32 (5,) 4 └──── b (Struct) ├──── ds1 (Dataset) │ └──── ds1col int32 (6,) 4 └──── ds2 (Dataset) └──── ds2col int32 (7,) 4 >>> st.save(r'D:\junk\morejunk') >>> os.listdir(r'D:\junk\morejunk') _root.sds a!a2.sds a.sds b!ds1.sds b!ds2.sds ''' multiload_schema = {} schema = schema[root] if filename is not None: filename = filename + '!' + root else: # for root level items filename = root # set default container in case nested .sds file doesn't exist default_container = TypeRegister.Struct data = {} sds_file = filename+SDS_EXTENSION # check for file in directory list if sds_file in dir: fullpath = path + os.path.sep + sds_file if multiload is None: # load container or array data = _read_sds(fullpath, sharename=sharename, include=include, info=info) else: if multiload_idx is None: # add full path for final multiload call multiload.append(fullpath) # maybe add to a different schema so the second pass for final load can be reduced # will this save any time? it would reduce the amount of calls to 'in', but not much else else: # pass the preloaded data to final constructor data = _read_sds(fullpath, sharename=sharename, include=include, info=info, multiload=multiload[multiload_idx[0]]) multiload_idx[0] += 1 # only recurse/restore order for containers if not isinstance(data, nocrawl): # TJD Feb 2020 - this code is slow when many files in the directory > 10000+ # TODO improve the speed of this for k in schema.keys(): data[k] = _sds_load_from_schema(schema, path, dir, filename=filename, root=k, sharename=sharename, include=include, info=info, nocrawl=nocrawl, multiload=multiload, multiload_idx=multiload_idx, threads=threads) # nested
<filename>src/cogent3/align/pairwise.py #!/usr/bin/env python """Align two Alignables, each of which can be a sequence or a subalignment produced by the same code.""" # How many cells before using linear space alignment algorithm. # Should probably set to about half of physical memory / PointerEncoder.bytes HIRSCHBERG_LIMIT = 10 ** 8 import warnings import numpy from cogent3.align.traceback import alignment_traceback, map_traceback from cogent3.core.alignment import Aligned from cogent3.evolve.likelihood_tree import LikelihoodTreeEdge from cogent3.util.misc import ascontiguousarray from . import pairwise_pogs_numba as align_module from . import pairwise_seqs_numba as seq_align_module from .indel_positions import leaf2pog def _as_combined_arrays(preds): pog1, pog2 = preds j_sources, j_sources_offsets = pog2.as_combined_array() i_sources, i_sources_offsets = pog1.as_combined_array() return i_sources, i_sources_offsets, j_sources, j_sources_offsets __author__ = "<NAME>" __copyright__ = "Copyright 2007-2020, The Cogent Project" __credits__ = ["<NAME>", "<NAME>", "<NAME>"] __license__ = "BSD-3" __version__ = "2020.7.2a" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "Production" class PointerEncoding(object): """Pack very small ints into a byte. The last field, state, is assigned whatever bits are left over after the x and y pointers have claimed what they need, which is expected to be only 2 bits each at most""" dtype = numpy.uint8 bytes = 1 def __init__(self, x, y): assert x > 0 and y > 0, (x, y) (x, y) = (numpy.ceil(numpy.log2([x + 1, y + 1]))).astype(int) s = 8 * self.bytes - sum([x, y]) assert s ** 2 >= 4 + 1, (x, y, s) # min states required self.widths = numpy.array([x, y, s]).astype(int) self.limits = 2 ** self.widths self.max_states = self.limits[-1] if DEBUG: print(self.max_states, "states allowed in viterbi traceback") self.positions = numpy.array([0, x, x + y], int) # a.flags.writeable = False def encode(self, x, y, s): parts = numpy.asarray([x, y, s], int) assert all(parts < self.limits), (parts, self.limits) return (parts << self.positions).sum() def decode(self, coded): return (coded >> self.positions) % self.limits def get_empty_array(self, shape): return numpy.zeros(shape, self.dtype) DEBUG = False def py_calc_rows( plan, x_index, y_index, i_low, i_high, j_low, j_high, preds, state_directions, T, xgap_scores, ygap_scores, match_scores, rows, track, track_enc, viterbi, local=False, use_scaling=False, use_logs=False, ): """Pure python version of the dynamic programming algorithms Forward and Viterbi. Works on sequences and POGs. Unli""" if use_scaling: warnings.warn("Pure python version of DP code can suffer underflows") # because it ignores 'exponents', the Pyrex version doesn't. source_states = list(range(len(T))) BEGIN = 0 ERROR = len(T) (rows, exponents) = rows if use_logs: neutral_score = 0.0 impossible = -numpy.inf else: neutral_score = 1.0 impossible = 0.0 best_score = impossible for i in range(i_low, i_high): x = x_index[i] i_sources = preds[0][i] current_row = rows[plan[i]] current_row[:, 0] = impossible if i == 0 and not local: current_row[0, 0] = neutral_score for j in range(j_low, j_high): y = y_index[j] j_sources = preds[1][j] for (state, bin, dx, dy) in state_directions: if local and dx and dy: cumulative_score = T[BEGIN, state] pointer = (dx, dy, BEGIN) else: cumulative_score = impossible pointer = (0, 0, ERROR) for (a, prev_i) in enumerate([[i], i_sources][dx]): source_row = rows[plan[prev_i]] for (b, prev_j) in enumerate([[j], j_sources][dy]): source_posn = source_row[prev_j] for prev_state in source_states: prev_value = source_posn[prev_state] transition = T[prev_state, state] if viterbi: if use_logs: candidate = prev_value + transition else: candidate = prev_value * transition # if DEBUG: # print prev_state, prev_value, state if candidate > cumulative_score: cumulative_score = candidate pointer = (a + dx, b + dy, prev_state) else: cumulative_score += prev_value * transition if dx and dy: d_score = match_scores[bin, x, y] elif dx: d_score = xgap_scores[bin, x] elif dy: d_score = ygap_scores[bin, y] else: d_score = neutral_score # if DEBUG: # print (dx, dy), d_score, cumulative_score if use_logs: current_row[j, state] = cumulative_score + d_score else: current_row[j, state] = cumulative_score * d_score if track is not None: track[i, j, state] = (numpy.array(pointer) << track_enc).sum() if (i == i_high - 1 and j == j_high - 1 and not local) or ( local and dx and dy and current_row[j, state] > best_score ): (best, best_score) = (((i, j), state), current_row[j, state]) # if DEBUG: # print i_low, i_high, j_low, j_high # print 'best_score %5.1f at %s' % (numpy.log(best_score), best) if not use_logs: best_score = numpy.log(best_score) return best + (best_score,) class TrackBack(object): def __init__(self, tlist): self.tlist = tlist def __str__(self): return "".join( "(%s,%s)%s" % (x, y, ".xym"[dx + 2 * dy]) for (state, (x, y), (dx, dy)) in self.tlist ) def offset(self, X, Y): tlist = [(state, (x + X, y + Y), dxy) for (state, (x, y), dxy) in self.tlist] return TrackBack(tlist) def __add__(self, other): return TrackBack(self.tlist + other.tlist) def as_state_posn_tuples(self): return [(s, p) for (s, p, d) in self.tlist] def as_bin_pos_tuples(self, state_directions): bin_map = dict((state, bin) for (state, bin, dx, dy) in state_directions) result = [] for (state, posn, (dx, dy)) in self.tlist: pos = [[None, i - 1][d] for (i, d) in zip(posn, [dx, dy])] result.append((bin_map.get(int(state), None), pos)) return result class Pair(object): def __init__(self, alignable1, alignable2, backward=False): alignables = [alignable1, alignable2] assert alignable1.alphabet == alignable2.alphabet self.alphabet = alignable1.alphabet for alignable in alignables: assert isinstance(alignable, _Alignable), type(alignable) if not isinstance(alignable, AlignableSeq): some_pogs = True break else: some_pogs = False if some_pogs and align_module is not None: aligner = align_module.calc_rows elif (not some_pogs) and seq_align_module is not None: aligner = seq_align_module.calc_rows else: aligner = py_calc_rows self.both_seqs = not some_pogs self.aligner = aligner if backward: alignables = [a.backward() for a in alignables] self.children = [alignable1, alignable2] = alignables self.max_preds = [alignable.max_preds for alignable in alignables] self.pointer_encoding = PointerEncoding(*self.max_preds) self.size = [len(alignable1), len(alignable2)] self.uniq_size = [len(alignable1.plh), len(alignable2.plh)] self.plan = ascontiguousarray( alignable1.get_row_assignment_plan(), dtype="int64" ) self.x_index = ascontiguousarray(alignable1.index, dtype="int64") self.y_index = ascontiguousarray(alignable2.index, dtype="int64") def get_seq_name_pairs(self): return [(a.leaf.edge_name, a.leaf.sequence) for a in self.children] def make_simple_emission_probs(self, mprobs, psubs1): psubs2 = [numpy.identity(len(psub)) for psub in psubs1] bins = [PairBinData(mprobs, *ppsubs) for ppsubs in zip(psubs1, psubs2)] return PairEmissionProbs(self, bins) def make_emission_probs(self, bins): bins = [PairBinData(*args) for args in bins] return PairEmissionProbs(self, bins) def make_reversible_emission_probs(self, bins, length): bins = [BinData(*bin) for bin in bins] return ReversiblePairEmissionProbs(self, bins, length) def backward(self): return Pair(*self.children, **dict(backward=True)) def __getitem__(self, index): assert len(index) == 2, index children = [ child[dim_index] for (child, dim_index) in zip(self.children, index) ] return Pair(*children) def _decode_state(self, track, encoding, posn, pstate): coded = int(track[posn[0], posn[1], pstate]) (a, b, state) = encoding.decode(coded) if state >= track.shape[-1]: raise ArithmeticError("Error state in traceback") (x, y) = map(int, posn) a, b = map(int, [a, b]) if state == -1: next = (x, y) else: if a: x = self.children[0][x][a - 1] if b: y = self.children[1][y][b - 1] next = numpy.array([x, y], int) return (next, (a, b), state) def traceback(self, track, encoding, posn, state, skip_last=False): result = [] started = False while 1: (nposn, (a, b), nstate) = self._decode_state(track, encoding, posn, state) if state: result.append((state, posn, (a > 0, b > 0))) if started and state == 0: break (posn, state) = (nposn, nstate) started = True result.reverse() if skip_last: result.pop() return TrackBack(result) def edge2plh(self, edge, plhs): bins = plhs[0].shape[0] plh = [ edge.sum_input_likelihoods(*[p[bin][1:-1] for p in plhs]) for bin in range(bins) ] return plh def get_pog(self, aligned_positions): (pog1, pog2) = [child.get_pog() for child in self.children] return pog1.traceback(pog2, aligned_positions) def get_pointer_encoding(self, n_states): assert n_states <= self.pointer_encoding.max_states, ( n_states, self.pointer_encoding.max_states, ) return self.pointer_encoding def get_score_arrays_shape(self): needed = max(self.plan) + 1 N = self.size[1] return (needed, N) def get_empty_score_arrays(self, n_states, dp_options): shape = self.get_score_arrays_shape() + (n_states,) mantissas = numpy.zeros(shape, float) if dp_options.use_logs: mantissas[:] = numpy.log(0.0) if dp_options.use_scaling: exponents = numpy.ones(shape, int) * -10000 else: exponents = None return ( ascontiguousarray(mantissas, dtype="float64"), ascontiguousarray(exponents, dtype="int64"), ) def calc_rows( self, i_low, i_high, j_low, j_high, state_directions, T, scores, rows, track, track_encoding, viterbi, **kw, ): (match_scores, (xscores, yscores)) = scores match_scores = ascontiguousarray(match_scores, dtype="float64") xscores = ascontiguousarray(xscores, dtype="float64") yscores = ascontiguousarray(yscores, dtype="float64") track_enc = ascontiguousarray( track_encoding and track_encoding.positions, dtype="int64" ) ( i_sources, i_sources_offsets, j_sources, j_sources_offsets, ) = _as_combined_arrays(self.children) state_directions = ascontiguousarray(state_directions, dtype="int64") T = ascontiguousarray(T, dtype="float64") track = ascontiguousarray(track, dtype="uint8") (mantissas, exponents) = rows mantissa = 0.0 if kw["use_scaling"]: mantissa = numpy.log(0.0) return self.aligner( self.plan, self.x_index, self.y_index, i_low, i_high, j_low, j_high, i_sources, i_sources_offsets, j_sources, j_sources_offsets, state_directions, T, xscores, yscores, match_scores, mantissas, mantissa, exponents, track, track_enc, viterbi, **kw, ) class _Alignable(object): def __init__(self, leaf): self.leaf = leaf self.alphabet = leaf.alphabet (uniq,
from Tkinter import * from Bio.SCOP import Node from Bio import SCOP from os import * from shutil import copy import re import Pmw import urllib import parms #from ScopFrame import ScopFrame import MolecularSystem from GUICards import * from BlissMolecularViewer import * #location of plusImage, minusImage gifs_dir = "C:\\CourseWork\\CS499\\MyTest\\" pdb_dir = "C:\\CourseWork\\CS499\\MyTest\\SCOP_Pdbs\\" """ This class represents each SCOP Node item in the treeWinText textbox. Creates a frame with: 1.+/-button, 2.Empty Label for padding, and 3.Pointer to the actual Node in SCOP Data structure KNOWN ISSUE: If the search returns a node of type 'CLASS' the '-' button for the toplevel node does not function as expected. It works as expected for all other types """ class ResultsScopNode(Frame): def __init__(self, obj, id, parent=None, toplevel=None): Frame.__init__(self, parent, bg='white') self.pack() self.toplevel = toplevel self.thisNode = Node() self.thisNode = obj self.isDisplayed = 1 self.isExpanded = 0 self.isALeaf = 0 self.id = id #### EACH SCOP Struct. in the profile is assigned an ID self.gifs_dir = parms.get('gifs_dir') self.pdb_dir = parms.get('pdb_dir') self.createButton() self.createLabel() def createButton(self): self.plusImage = PhotoImage(file=self.gifs_dir+"plusnode.gif") self.minusImage = PhotoImage(file=self.gifs_dir+"minusnode.gif") #Create an empty Label to pad according to Hierarchy self.emptyLabel = Label(self, text='', bg='white') self.emptyLabel.pack(side=LEFT, padx= 2 * self.toplevel.nameHierarchy[self.thisNode.type]) if self.thisNode.children: self.button = Button(self, image=self.plusImage, command=(lambda node=self, id=self.id: self.toplevel.selectNode(node))) else: self.button = Button(self, image=self.minusImage, command=(lambda node=self, id=self.id: self.toplevel.selectNode(node))) self.isALeaf = 1 self.button.pack(side=LEFT) self.button.bind('<Button-1>', self.singleClick) def createLabel(self): self.label = Label(self, text=' '+self.thisNode.description+' ', bg='white') self.label.pack(side=RIGHT) self.label.bind('<Double-1>', self.doubleClick) self.label.bind('<Button-1>', self.singleClick) def singleClick(self, event): self.toplevel.toggleSelection(self) if len(self.thisNode.children) == 0: self.toplevel.viewButton.config(state=NORMAL) else: self.toplevel.viewButton.config(state=DISABLED) self.displayLineage() #This function manages the lineageBox text box. Updates text box with the lineage of the selected node #If the ResultsNode's top level node is not of type 'CLASS' and when no information is available from the #SCOP structure, the file _hie.txt is parsed for the toplevel lineage def displayLineage(self): type = {'cf': 'Fold', 'cl': 'Class', 'dm': 'Protein', 'fa': 'Family', 'px': 'Domain', 'sf': 'Superfamily', 'sp': 'Species', '':''} self.toplevel.lineageBox.config(state=NORMAL) self.toplevel.lineageBox.delete(1.0, END) currentNode = self.thisNode while len(currentNode.type) > 0: self.toplevel.lineageBox.insert('1.0', type[currentNode.type]+": "+currentNode.description+'\n') indx = '1.'+str(len(type[currentNode.type])) currentNode = currentNode.parent self.toplevel.lineageBox.tag_add('label', '1.0', indx) self.toplevel.lineageBox.tag_config('label', font=('veranda', 8, 'bold')) #The list self.nodesLineage has the all the toplevel lineage. This list is created when loading the SCOP Files for lines in self.toplevel.nodesLineage[self.id-1]: indx = '1.'+str(lines.find(':')) lines = lines[:lines.find('!&!')] self.toplevel.lineageBox.insert('1.0', lines+'\n') self.toplevel.lineageBox.tag_add('label', '1.0', indx) self.toplevel.lineageBox.tag_config('label', font=('veranda', 8, 'bold')) self.toplevel.lineageBox.insert('1.0', '\t\tLineage\n') self.toplevel.lineageBox.tag_add('label', '1.0', '2.0') self.toplevel.lineageBox.tag_config('label', font=('veranda', 8, 'bold')) self.toplevel.lineageBox.config(state=DISABLED) def doubleClick(self, event): self.toplevel.selectNode(self) def expand(self): self.button.config(image=self.minusImage) self.isExpanded = 1 def contract(self): self.button.config(image=self.plusImage) self.isExpanded = 0 class ResultsFrame(Frame): #Arguments- # pth-This the profile name. If the profile is not saved, then pth points to the # location where searchResults are stored # parent-This is the paned window object # toplevel-This is the actual Viewer window (Toplevel()). Only needed to change the title of the window # viewer-This is the SCOP Domain Viewer object. Note that a SCOP Domain viewer object should be loaded # to load profiles def __init__(self, pth, parent=None, toplevel=None, viewer=None): Frame.__init__(self, bg='white') self.focus() self.scopViewer = viewer #The top pane in the viewer represents the SCOP Browser frame self.topFrame = parent.pane('top') #The bottom pane in the viewer represents the Molecular Viewer self.toplevel = parent.pane('bottom') self.viewer_window = toplevel self.dirpath = pth self.isCurrentProfileSaved = 0 self.currentProfileName = "" if self.dirpath.find(parms.get('profilesPath')) == 0 and self.dirpath.find(parms.get('saveResultsin')) != 0: pt = self.dirpath if pt[len(pt)-1] == '\\': self.currentProfileName = self.dirpath[self.dirpath[:-1].rindex('\\')+1:-1] else: self.currentProfileName = self.dirpath[self.dirpath.rindex('\\')+1:] self.isCurrentProfileSaved = 1 print "HERE++++++++++++"+self.currentProfileName+" -- "+self.dirpath self.nodesList = [] self.nodesNames = [] self.removedNames = [] self.nodesLineage = [] self.nodesScop = [] self.domainsScop = [] self.nameHierarchy = parms.get('nameHierarchy') if self.dirpath[len(self.dirpath)-1] != '\\': self.dirpath = self.dirpath + '\\' #Loads the profile/search results from the path given self.getAllNodes() print "Nodes SCOP: "+str(len(self.nodesScop)) self.balloon = Pmw.Balloon(self.topFrame) self.menuBar = Pmw.MenuBar(self.topFrame, hull_relief=RAISED, hull_borderwidth=1, balloon=self.balloon) self.menuBar.pack(fill=X, expand=NO, anchor=N) self.menuBar.addmenu('Profiles', 'Search Profiles') self.menuBar.addmenuitem('Profiles', 'command', command=self.saveCurrentProfile, label='Save Profile') self.menuBar.addmenuitem('Profiles', 'command', command=self.loadProfile, label='Load Profile') self.menuBar.addmenuitem('Profiles', 'command', command=self.editProfile, label='Edit Profile(s)') self.menuBar.addmenu('Edit', 'Edit Results') self.menuBar.addmenuitem('Edit', 'command', command=self.removeSelectedNode, label='Remove Node') self.menuBar.addmenu('SCOP', 'Search Entire SCOP Database') self.menuBar.addmenuitem('SCOP', 'command', label='View Selected Node in SCOP Domain Viewer', command=self.dispInScopDomViewer) self.treeWinText = Pmw.ScrolledText(self.topFrame, labelpos = 'n', usehullsize = 1, label_text='SCOP Search Results', hull_width = 100, hull_height = 20, text_padx = 1, text_pady = 1, text_wrap='none', text_cursor='arrow') self.treeWinText.pack(side=LEFT, expand=YES, fill=BOTH) self.pack(expand=YES, fill=BOTH) self.lineageBox = Text(self.topFrame, relief=FLAT, cursor = "arrow") self.lineageBox.pack(side=TOP, anchor=N) self.lineageBox.config(height=12, width=38, state=DISABLED) self.viewButton = Button(self.topFrame, text="View", state=DISABLED, height=1, command=self.displaySelected, width=5) self.viewButton.pack(side=LEFT, anchor=S, padx=10) self.scopNodeId = 0 self.lines = 1 #Load all the toplevel nodes (all of type 'Class') into the textbox for item in self.nodesScop: self.scopNodeId = self.scopNodeId + 1 self.nodesList.append(ResultsScopNode(item.root, self.scopNodeId, self.topFrame, self)) indx = "%0.1f"%(self.lines) self.treeWinText.window_create(indx, window=self.nodesList[len(self.nodesList)-1]) indx = "%0.1f"%(self.lines+0.1) self.treeWinText.insert(indx, '\n') self.lines = self.lines + 1 #self.currentSelection is updated everytime when a user clicks on a Node in the treeWinText text box self.currentSelection = self.nodesList[0] self.treeWinText.pack() self.treeWinText.configure(text_state='disabled') #When the user selects to 'View the Selected Domain in SCOP Viewer', the following function #builds the lineage list and calls expandToSelectedNode in the Viewer def dispInScopDomViewer(self): curNode = self.currentSelection.thisNode ling = [] if curNode.sunid == '0': print "Cannot Select Identification Node" else: ling.append(curNode.description+'!&!'+curNode.sunid) while curNode.parent: curNode = curNode.parent ling.append(curNode.description+'!&!'+curNode.sunid) ling.pop() for item in self.nodesLineage[self.currentSelection.id-1]: item = item[(item.find(':')+1):].strip() ling.append(item) self.scopViewer.expandToSelectedNode(ling) #This function initiates saving a node as a separate profile. def saveCurrentProfile(self): self.nameProfileDlg= Pmw.PromptDialog(self, title = 'Save Current Profile As...', label_text = 'Enter the Name of the Profile:', entryfield_labelpos = 'n', defaultbutton = 0, buttons = ('OK', 'Cancel'), command = self.validateName) #Check to see if a profile with the same name already exists in the Profiles Directory def validateName(self, result): if result is None or result == 'Cancel': self.nameProfileDlg.withdraw() elif result == 'OK': self.currentProfileName = self.nameProfileDlg.get() self.nameProfileDlg.withdraw() print self.currentProfileName+' IN get' if path.isdir(parms.get('profilesPath')+self.currentProfileName): dialog = Pmw.MessageDialog(self, title = 'Profile Name Rejected', defaultbutton = 0, buttons = ('OK', ), message_text = 'A Profile with name \''+self.currentProfileName+'\' already exists!') dialog.activate() self.currentProfileName = "" self.saveCurrentProfile() else: self.finishOffSaving() #After a valid profile name is selected, create SCOP Parseable Text files (_cla.txt, #_hie.txt and _des.txt) for the node. With these files a separate SCOP Structure can be #created with the node as a toplevel item. def finishOffSaving(self): print 'In save: '+self.currentProfileName if len(self.currentProfileName) > 0: #Create a profile directory, try: mkdir(parms.get('profilesPath')+self.currentProfileName) except OSError: dialog = Pmw.MessageDialog(self, title = 'Unable to Create Directory', defaultbutton = 0, buttons = ('OK', ), message_text = 'Unable to create the directory: '+parms.get('profilesPath')+self.currentProfileName) dialog.activate() dialog = Pmw.MessageDialog(self, title = 'Profile Not Saved', defaultbutton = 0, buttons = ('OK', ), message_text = 'Unable to Save Profile') dialog.activate() return #Copy all the results from the temperory directory to the actual profile directory files = listdir(parms.get('saveResultsin')) for name in self.nodesNames: if self.removedNames.count(self.nodesNames.index(name)+1) == 0 and files.count(name+'_hie.txt') > 0 and files.count(name+'_cla.txt') > 0 and files.count(name+'_des.txt') > 0: copy(parms.get('saveResultsin')+name+'_hie.txt', parms.get('profilesPath')+self.currentProfileName+'\\'+name+'_hie.txt') copy(parms.get('saveResultsin')+name+'_des.txt', parms.get('profilesPath')+self.currentProfileName+'\\'+name+'_des.txt') copy(parms.get('saveResultsin')+name+'_cla.txt', parms.get('profilesPath')+self.currentProfileName+'\\'+name+'_cla.txt') remove(parms.get('saveResultsin')+name+'_hie.txt') remove(parms.get('saveResultsin')+name+'_des.txt') remove(parms.get('saveResultsin')+name+'_cla.txt') else: print "Unable to save: "+name+" under the profile name: "+self.currentProfileName self.copyPDBs(parms.get('profilesPath')+self.currentProfileName) self.isCurrentProfileSaved = 1 self.treeWinText.configure(label_text = 'Current Profile: '+self.currentProfileName) #This function creates an independent database for the profile. #Creates folder structure based on the hierarchy #Copies the associated .ent files from the SCOP Database to current profile location def copyPDBs(self, savePath): #dirNamesDonotIncludelist = ['(', ')', ':', '-', '.', ',', ';'] for ScopItem in self.nodesScop: indx = self.nodesNames.index(ScopItem.root.description[len(ScopItem.root.description)-5:])+1 if self.removedNames.count(indx) == 0: pos = 0 #parse the scop structure for eachItem in self.domainsScop[indx-1]: hierarchy = [] curItem = eachItem """ This loop creates a list of directories need to save the domain Eg: If the node hierarchy is of the form Globin->Globin_child->Globin_child_child->Globin_child_child_child and 'Globin_child_child_child' is selected then the list hierarchy will contain [Globin, Globin_child, Globin_child_child, Globin_child_child_child] """ while curItem is not None: name = self.filterFoldername(curItem.description) #If the length of the node.description is > 20, the pad the end of it #with its sunid if len(name)>20: name = name[:20]+'_'+curItem.sunid hierarchy.append(name.strip()) curItem = curItem.parent curPath = savePath #From the 'hierarchy' list, from top down, each item is checked to see if the #folder exists by that name. If it doesnot exist, the folder structure is created. #If the folder by that name exists, then the next item in the 'heirarchy' list is checked hierarchy.pop() while len(hierarchy) > 0: foldr = hierarchy.pop() curPath = curPath+'\\'+foldr if not path.isdir(curPath): mkdir(curPath) while len(hierarchy) > 0: curPath = curPath + '\\' + hierarchy.pop() mkdir(curPath) scopPdbsLoc = parms.get('scopPDBsLocation') #Copy the .ent file from the SCOP Database to the current profile location if path.isdir(scopPdbsLoc+eachItem.sid[2:4]) and path.isfile(scopPdbsLoc+eachItem.sid[2:4]+'\\'+eachItem.sid+'.ent'): copy(scopPdbsLoc+eachItem.sid[2:4]+'\\'+eachItem.sid+'.ent', curPath+'\\'+eachItem.sid+'.ent') else: print "Protein Domain: "+eachItem.sid+" doesnot exist!" def loadProfile(self): files = listdir(parms.get('profilesPath')) profiles = [] for item in files: direc = parms.get('profilesPath')+item if path.isdir(direc): profiles.append(item) self.neww = Toplevel() dropdown = Pmw.ComboBox(self.neww, label_text = 'Select a Profile:', labelpos = 'nw', selectioncommand=self.loadSelectedProfile, scrolledlist_items = profiles) dropdown.pack(side = LEFT, anchor = N, fill = X, expand = 1, padx = 8, pady = 8) def loadSelectedProfile(self, selection): self.neww.destroy() new_window=Toplevel() pw = Pmw.PanedWidget(new_window) pw.add('top', min=10) pw.add('bottom', min = 10) new_window.title("SCOP Profile") new_window.config(bg='white') geometry_string = "%dx%d%+d%+d" %(700,800,50,30) # width,height,x-offset,y-offset new_window.geometry(geometry_string) pw.pane('top').configure(background='white') a = ResultsFrame(parms.get('profilesPath')+selection+'\\', pw, new_window, self.scopViewer).pack(side=TOP) pw.pack(expand=1, fill=BOTH) bottomPane = pw.pane('bottom') bottomPane.system = MolecularSystem.System(self, bottomPane) bottomPane.system.load_pdb("") bottomPane.system.color_ribbon_by_chain() bottomPane.viewer = MolnirMolecularViewer(bottomPane, bottomPane.system) bottomPane.viewer.loadSystem(bottomPane.system) bottomPane.cardframe = CardFrame(bottomPane, bottomPane.system) bottomPane.cardframe.pack(expand=NO, fill=X) self.scopViewer.topMenuSystem.parent.scopViewer_windows.append(new_window) self.topFrame.config(bg='white') #This function is not implemented. The idea is to build a GUI to help the user manage (rename, delete, etc) profiles def editProfile(self): print "Not Implemented" #Removes the selected node from the treeWinText text box and in self.nodesList #the removedNode acts just as a placeholder def removeSelectedNode(self): if self.currentSelection.thisNode.parent is None: self.removedNames.append(self.currentSelection.id) self.treeWinText.configure(text_state='normal') pos = 0 while self.nodesList[pos].id != self.currentSelection.id: pos = pos + 1 while len(self.nodesList) > pos and self.nodesList[pos].id == self.currentSelection.id: self.nodesList.pop(pos) indx = "%0.1f"%(pos+1) indx1 = "%0.1f"%(pos+2) self.treeWinText.delete(indx, indx1) self.treeWinText.configure(text_state='disabled') self.currentSelection = self.nodesList[0] #Given the path to the profile, this function loads all the SCOP Structures and #also creates a list with lineage lists for each SCOP Structure. def getAllNodes(self): files = listdir(self.dirpath) for item in files: if re.compile('\d\d\d\d\d_hie.txt').match(item): if path.isfile(self.dirpath+item[:5]+'_des.txt') and path.isfile(self.dirpath+item[:5]+'_cla.txt'): self.nodesNames.append(item[:5]) clasi = file(self.dirpath+item[:5]+'_cla.txt', 'r') descr = file(self.dirpath+item[:5]+'_des.txt', 'r') hiera = file(self.dirpath+item[:5]+'_hie.txt', 'r') self.nodesScop.append(SCOP.Scop(clasi, descr, hiera)) self.domainsScop.append(self.nodesScop[len(self.nodesScop)-1].getDomains()) descr.seek(0) self.nodesScop[len(self.nodesScop)-1].root.description = descr.readline().split('\t').pop().strip()+'_'+item[:5] hiera.seek(0) line = hiera.readline() lineage = [] while line[0] == '#': lineage.append(line[1:].strip()) line = hiera.readline() lineage.reverse() self.nodesLineage.append(lineage) #print self.nodesScop[len(self.nodesScop)-1].root.description #print
r[4][i] == -1: ant_str += '(' + 'ro_' + str(reaction_index) + '_' + str(reg) + ' + (1 - ' + 'ro_' \ + str(reaction_index) + '_' + str(reg) + ')/(1 + S' + str(reg) + '/kma_' \ + str(reaction_index) \ + '_' + str(reg) + '))^ma_' + str(reaction_index) + '_' + str(reg) + '*' if r[5][i] == 'a' and r[4][i] == 1: ant_str += '(' + 'ro_' + str(reaction_index) + '_' + str(reg) + ' + (1 - ' + 'ro_' \ + str(reaction_index) + '_' + str(reg) + ')*(S' + str(reg) + '/kma_' \ + str(reaction_index) \ + '_' + str(reg) + ')/(1 + S' + str(reg) + '/kma_' + str(reaction_index) \ + '_' + str(reg) + '))^ma_' + str(reaction_index) + '_' + str(reg) + '*' if r[5][i] == 'a': ma.add('ma_' + str(reaction_index) + '_' + str(reg)) kma.add('kma_' + str(reaction_index) + '_' + str(reg)) ro.add('ro_' + str(reaction_index) + '_' + str(reg)) ant_str = ant_str \ + '(kf_' + str(reaction_index) + '*(S' + str(r[1][0]) + '/km_' + str(reaction_index) \ + '_' + str(r[1][0]) + ')^m_' + str(reaction_index) + '_' + str(r[1][0]) + '*(' + 'S' \ + str(r[1][1]) + '/km_' + str(reaction_index) + '_' + str(r[1][1]) + ')^m_' \ + str(reaction_index) + '_' + str(r[1][1]) + ' - kr_' + str(reaction_index) + '*(S' \ + str(r[2][0]) + '/km_' + str(reaction_index) + '_' + str(r[2][0]) \ + ')^m_' + str(reaction_index) + '_' + str(r[2][0]) + '*(S' \ + str(r[2][1]) + '/km_' + str(reaction_index) + '_' + str(r[2][1]) \ + ')^m_' + str(reaction_index) + '_' + str(r[2][1]) + ')' if kinetics[0][8:10] == 'CM': if 's' in r[5]: ant_str += '/(((' else: ant_str += '/((' ant_str += '1 + S' + str(r[1][0]) + '/km_' + str(reaction_index) \ + '_' + str(r[1][0]) + ')^m_' + str(reaction_index) + '_' + str(r[1][0]) + '*(1 + S' \ + str(r[1][1]) + '/km_' + str(reaction_index) + '_' + str(r[1][1]) + ')^m_' \ + str(reaction_index) + '_' + str(r[1][1]) + ' + (1 + S' + str(r[2][0]) + '/km_' \ + str(reaction_index) + '_' + str(r[2][0]) + ')^m_' + str(reaction_index) + '_' \ + str(r[2][0]) + '*(1 + S' + str(r[2][1]) + '/km_' + str(reaction_index) + '_' \ + str(r[2][1]) \ + ')^m_' + str(reaction_index) + '_' + str(r[2][1]) + ' - 1)' for i, reg in enumerate(r[3]): if r[5][i] == 's' and r[4][i] == -1: ant_str += ' + (S' + str(reg) + '/kms_' + str(reaction_index) + '_' + str(reg) \ + ')^ms_' + str(reaction_index) + '_' + str(reg) if r[5][i] == 's' and r[4][i] == 1: ant_str += ' + (kms_' + str(reaction_index) + '_' + str(reg) + '/S' + str(reg) \ + ')^ms_' + str(reaction_index) + '_' + str(reg) if r[5][i] == 's': ms.add('ms_' + str(reaction_index) + '_' + str(reg)) kms.add('kms_' + str(reaction_index) + '_' + str(reg)) if 's' in r[5]: ant_str += ')' + enzyme_end else: ant_str += enzyme_end if kinetics[0][8:10] == 'DM': if 's' in r[5]: ant_str += '/(((' else: ant_str += '/((' ant_str += 'S' + str(r[1][0]) + '/km_' + str(reaction_index) \ + '_' + str(r[1][0]) + ')^m_' + str(reaction_index) + '_' + str(r[1][0]) + '*(S' \ + str(r[1][1]) + '/km_' + str(reaction_index) + '_' + str(r[1][1]) + ')^m_' \ + str(reaction_index) + '_' + str(r[1][1]) + ' + (S' + str(r[2][0]) + '/km_' \ + str(reaction_index) + '_' + str(r[2][0]) + ')^m_' + str(reaction_index) + '_' \ + str(r[2][0]) + '*(S' + str(r[2][1]) + '/km_' + str(reaction_index) + '_' + str(r[2][1]) \ + ')^m_' + str(reaction_index) + '_' + str(r[2][1]) + ' + 1)' for i, reg in enumerate(r[3]): if r[5][i] == 's' and r[4][i] == -1: ant_str += ' + (S' + str(reg) + '/kms_' + str(reaction_index) + '_' + str(reg) \ + ')^ms_' + str(reaction_index) + '_' + str(reg) if r[5][i] == 's' and r[4][i] == 1: ant_str += ' + (kms_' + str(reaction_index) + '_' + str(reg) + '/S' + str(reg) \ + ')^ms_' + str(reaction_index) + '_' + str(reg) if r[5][i] == 's': ms.add('ms_' + str(reaction_index) + '_' + str(reg)) kms.add('kms_' + str(reaction_index) + '_' + str(reg)) if 's' in r[5]: ant_str += ')' + enzyme_end else: ant_str += enzyme_end if kinetics[0][8:10] == 'SM': if 's' in r[5]: ant_str += '/(((' else: ant_str += '/((' ant_str += '1 + S' + str(r[1][0]) + '/km_' + str(reaction_index) \ + '_' + str(r[1][0]) + ')^m_' + str(reaction_index) + '_' + str(r[1][0]) + '*(1 + S' \ + str(r[1][1]) + '/km_' + str(reaction_index) + '_' + str(r[1][1]) + ')^m_' \ + str(reaction_index) + '_' + str(r[1][1]) + '*(1 + S' + str(r[2][0]) + '/km_' \ + str(reaction_index) + '_' + str(r[2][0]) + ')^m_' + str(reaction_index) + '_' \ + str(r[2][0]) + '*(1 + S' + str(r[2][1]) + '/km_' + str(reaction_index) + '_' \ + str(r[2][1]) \ + ')^m_' + str(reaction_index) + '_' + str(r[2][1]) + ')' for i, reg in enumerate(r[3]): if r[5][i] == 's' and r[4][i] == -1: ant_str += ' + (S' + str(reg) + '/kms_' + str(reaction_index) + '_' + str(reg) \ + ')^ms_' + str(reaction_index) + '_' + str(reg) if r[5][i] == 's' and r[4][i] == 1: ant_str += ' + (kms_' + str(reaction_index) + '_' + str(reg) + '/S' + str(reg) \ + ')^ms_' + str(reaction_index) + '_' + str(reg) if r[5][i] == 's': ms.add('ms_' + str(reaction_index) + '_' + str(reg)) kms.add('kms_' + str(reaction_index) + '_' + str(reg)) if 's' in r[5]: ant_str += ')' + enzyme_end else: ant_str += enzyme_end if kinetics[0][8:10] == 'FM': if 's' in r[5]: ant_str += '/(((' else: ant_str += '/((' ant_str += 'S' + str(r[1][0]) + '/km_' + str(reaction_index) \ + '_' + str(r[1][0]) + ')^m_' + str(reaction_index) + '_' + str(r[1][0]) + '*(S' \ + str(r[1][1]) + '/km_' + str(reaction_index) + '_' + str(r[1][1]) + ')^m_' \ + str(reaction_index) + '_' + str(r[1][1]) + '*(S' + str(r[2][0]) + '/km_' \ + str(reaction_index) + '_' + str(r[2][0]) + ')^m_' + str(reaction_index) + '_' \ + str(r[2][0]) + '*(S' + str(r[2][1]) + '/km_' + str(reaction_index) + '_' + str(r[2][1]) \ + ')^m_' + str(reaction_index) + '_' + str(r[2][1]) + ')^(1/2)' for i, reg in enumerate(r[3]): if r[5][i] == 's' and r[4][i] == -1: ant_str += ' + (S' + str(reg) + '/kms_' + str(reaction_index) + '_' + str(reg) \ + ')^ms_' + str(reaction_index) + '_' + str(reg) if r[5][i] == 's' and r[4][i] == 1: ant_str += ' + (kms_' + str(reaction_index) + '_' + str(reg) + '/S' + str(reg) \ + ')^ms_' + str(reaction_index) + '_' + str(reg) if r[5][i] == 's': ms.add('ms_' + str(reaction_index) + '_' + str(reg)) kms.add('kms_' + str(reaction_index) + '_' + str(reg)) if 's' in r[5]: ant_str += ')' + enzyme_end else: ant_str += enzyme_end if kinetics[0][8:10] == 'PM': num_s = r[5].count('s') if 's' in r[5]: ant_str += '/(' for i, reg in enumerate(r[3]): if r[5][i] == 's' and r[4][i] == -1: ant_str += '(S' + str(reg) + '/kms_' + str(reaction_index) + '_' + str(reg) \ + ')^ms_' + str(reaction_index) + '_' + str(reg) if r[5][i] == 's' and r[4][i] == 1: ant_str += '(kms_' + str(reaction_index) + '_' + str(reg) + '/S' + str(reg) \ + ')^ms_' + str(reaction_index) + '_' + str(reg) if (i + 1) < num_s: ant_str += ' + ' if r[5][i] == 's': ms.add('ms_' + str(reaction_index) + '_' + str(reg)) kms.add('kms_' + str(reaction_index) + '_' + str(reg)) if 's' in r[5]: ant_str += ')' + enzyme_end km.add('km_' + str(reaction_index) + '_' + str(r[1][0])) km.add('km_' + str(reaction_index) + '_' + str(r[1][1])) km.add('km_' + str(reaction_index) + '_' + str(r[2][0])) km.add('km_' + str(reaction_index) + '_' + str(r[2][1])) m.add('m_' + str(reaction_index) + '_' + str(r[1][0])) m.add('m_' + str(reaction_index) + '_' + str(r[1][1])) m.add('m_' + str(reaction_index) + '_' + str(r[2][0])) m.add('m_' + str(reaction_index) + '_' + str(r[2][1])) kf.add('kf_' + str(reaction_index)) kr.add('kr_' + str(reaction_index)) ant_str
# Copyright 2017 QuantRocket - All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six import os import sys import requests from quantrocket.houston import houston from quantrocket.cli.utils.output import json_to_cli from quantrocket.cli.utils.stream import to_bytes from quantrocket.cli.utils.files import write_response_to_filepath_or_buffer from quantrocket.cli.utils.parse import dict_strs_to_dict, dict_to_dict_strs from quantrocket.exceptions import NoHistoricalData TMP_DIR = os.environ.get("QUANTROCKET_TMP_DIR", "/tmp") def create_edi_db(code, exchanges): """ Create a new database for collecting historical data from EDI. Parameters ---------- code : str, required the code to assign to the database (lowercase alphanumerics and hyphens only) exchanges : list of str, required one or more exchange codes (MICs) which should be collected Returns ------- dict status message """ params = { "vendor": "edi", "exchanges": exchanges } response = houston.put("/history/databases/{0}".format(code), params=params) houston.raise_for_status_with_json(response) return response.json() def _cli_create_edi_db(*args, **kwargs): return json_to_cli(create_edi_db, *args, **kwargs) def create_ibkr_db(code, universes=None, sids=None, start_date=None, end_date=None, bar_size=None, bar_type=None, outside_rth=False, primary_exchange=False, times=None, between_times=None, shard=None): """ Create a new database for collecting historical data from Interactive Brokers. The historical data requirements you specify when you create a new database (bar size, universes, etc.) are applied each time you collect data for that database. Parameters ---------- code : str, required the code to assign to the database (lowercase alphanumerics and hyphens only) universes : list of str include these universes sids : list of str include these sids start_date : str (YYYY-MM-DD), optional collect history back to this start date (default is to collect as far back as data is available) end_date : str (YYYY-MM-DD), optional collect history up to this end date (default is to collect up to the present) bar_size : str, required the bar size to collect. Possible choices: "1 secs", "5 secs", "10 secs", "15 secs", "30 secs", "1 min", "2 mins", "3 mins", "5 mins", "10 mins", "15 mins", "20 mins", "30 mins", "1 hour", "2 hours", "3 hours", "4 hours", "8 hours", "1 day", "1 week", "1 month" bar_type : str, optional the bar type to collect (if not specified, defaults to MIDPOINT for FX and TRADES for everything else). Possible choices: "TRADES", "ADJUSTED_LAST", "MIDPOINT", "BID", "ASK", "BID_ASK", "HISTORICAL_VOLATILITY", "OPTION_IMPLIED_VOLATILITY" outside_rth : bool include data from outside regular trading hours (default is to limit to regular trading hours) primary_exchange : bool limit to data from the primary exchange (default False) times : list of str (HH:MM:SS), optional limit to these times (refers to the bar's start time; mutually exclusive with `between_times`) between_times : list of str (HH:MM:SS), optional limit to times between these two times (refers to the bar's start time; mutually exclusive with `times`) shard : str, optional whether and how to shard the database, i.e. break it into smaller pieces. Required for intraday databases. Possible choices are `year` (separate database for each year), `month` (separate database for each year+month), `day` (separate database for each day), `time` (separate database for each bar time), `sid` (separate database for each security), `sid,time` (duplicate copies of database, one sharded by sid and the other by time), or `off` (no sharding). See http://qrok.it/h/shard for more help. Returns ------- dict status message """ params = {} if universes: params["universes"] = universes if sids: params["sids"] = sids if start_date: params["start_date"] = start_date if end_date: params["end_date"] = end_date if bar_size: params["bar_size"] = bar_size if bar_type: params["bar_type"] = bar_type if outside_rth: params["outside_rth"] = outside_rth if primary_exchange: params["primary_exchange"] = primary_exchange if times: params["times"] = times if between_times: params["between_times"] = between_times if shard: params["shard"] = shard params["vendor"] = "ibkr" response = houston.put("/history/databases/{0}".format(code), params=params) houston.raise_for_status_with_json(response) return response.json() def _cli_create_ibkr_db(*args, **kwargs): return json_to_cli(create_ibkr_db, *args, **kwargs) def create_sharadar_db(code, sec_type, country="US"): """ Create a new database for collecting historical data from Sharadar. Parameters ---------- code : str, required the code to assign to the database (lowercase alphanumerics and hyphens only) sec_type : str, required the security type to collect. Possible choices: STK, ETF country : str, required country to collect data for. Possible choices: US, FREE Returns ------- dict status message """ params = {"vendor": "sharadar"} if sec_type: params["sec_type"] = sec_type if country: params["country"] = country response = houston.put("/history/databases/{0}".format(code), params=params) houston.raise_for_status_with_json(response) return response.json() def _cli_create_sharadar_db(*args, **kwargs): return json_to_cli(create_sharadar_db, *args, **kwargs) def create_usstock_db(code, bar_size=None, free=False, universe=None): """ Create a new database for collecting historical US stock data from QuantRocket. Parameters ---------- code : str, required the code to assign to the database (lowercase alphanumerics and hyphens only) bar_size : str, optional the bar size to collect. Possible choices: 1 day free : bool limit to free sample data. Default is to collect the full dataset. universe : str, optional [DEPRECATED] whether to collect free sample data or the full dataset. This parameter is deprecated and will be removed in a future release. Please use free=True to request free sample data or free=False (or omit the free parameter) to request the full dataset. Returns ------- dict status message Examples -------- Create a database for end-of-day US stock prices: create_usstock_db('usstock-1d') """ params = { "vendor": "usstock", } if bar_size: params["bar_size"] = bar_size if free: params["free"] = free if universe: import warnings # DeprecationWarning is ignored by default but we want the user # to see it warnings.simplefilter("always", DeprecationWarning) warnings.warn( "the `universe` parameter is deprecated and will be removed in a " "future release, please use `free=True` to request free sample data " "or free=False (or omit the free parameter) to request the full dataset", DeprecationWarning) params["universe"] = universe response = houston.put("/history/databases/{0}".format(code), params=params) houston.raise_for_status_with_json(response) return response.json() def _cli_create_usstock_db(*args, **kwargs): return json_to_cli(create_usstock_db, *args, **kwargs) def create_custom_db(code, bar_size=None, columns=None): """ Create a new database into which custom data can be loaded. Parameters ---------- code : str, required the code to assign to the database (lowercase alphanumerics and hyphens only) bar_size : str, required the bar size that will be loaded. This isn't enforced but facilitates efficient querying and provides a hint to other parts of the API. Use a Pandas timedelta string, for example, '1 day' or '1 min' or '1 sec'. columns : dict of column name:type, required the columns to create, specified as a Python dictionary mapping column names to column types. For example, {"Close":"float", "Volume":"int"}. Valid column types are "int", "float", "str", "date", and "datetime". Column names must start with a letter and include only letters, numbers, and underscores. Sid and Date columns are automatically created and need not be specified. For boolean columns, choose type 'int' and store 1 or 0. Returns ------- dict status message Examples -------- Create a custom database for loading fundamental data: >>> create_custom_db( "custom-fundamentals", bar_size="1 day", columns={ "Revenue":"int", "EPS":"float", "Currency":"str", "TotalAssets":"int"}) Create a custom database for loading intraday OHCLV data: >>> create_custom_db( "custom-stk-1sec", bar_size="1 sec", columns={ "Open":"float", "High":"float", "Low":"float", "Close":"float", "Volume":"int"}) """ params = {} if bar_size: params["bar_size"] = bar_size if columns: params["columns"] = dict_to_dict_strs(columns) params["vendor"] = "custom" response = houston.put("/history/databases/{0}".format(code), params=params) houston.raise_for_status_with_json(response) return response.json() def _cli_create_custom_db(*args, **kwargs): columns = kwargs.get("columns", None) if columns: kwargs["columns"] = dict_strs_to_dict(*columns) return json_to_cli(create_custom_db, *args, **kwargs) def get_db_config(code): """ Return the configuration for a history database. Parameters ---------- code : str, required the database code Returns ------- dict config """ response = houston.get("/history/databases/{0}".format(code)) houston.raise_for_status_with_json(response) return response.json() def _cli_get_db_config(*args, **kwargs): return json_to_cli(get_db_config, *args, **kwargs) def drop_db(code, confirm_by_typing_db_code_again=None): """ Delete a history database. Deleting a history database deletes its configuration and data and is irreversible. Parameters ---------- code : str, required the database code confirm_by_typing_db_code_again : str, required enter the db code again to confirm you want to drop the database, its config, and all its data Returns ------- dict status message """ params = {"confirm_by_typing_db_code_again": confirm_by_typing_db_code_again} response = houston.delete("/history/databases/{0}".format(code), params=params) houston.raise_for_status_with_json(response) return response.json() def _cli_drop_db(*args, **kwargs): return json_to_cli(drop_db, *args, **kwargs) def
<gh_stars>10-100 from abc import ABC, abstractmethod import inspect import re import textwrap from typing import Generator, Set from clang.cindex import Cursor, CursorKind, TokenKind, TypeKind from .types import * from .registry import xr_registry class SkippableCodeItemException(Exception): pass class CodeItem(ABC): def __init__(self, cursor: Cursor) -> None: self.cursor = cursor @staticmethod def blank_lines_before() -> int: return 1 @staticmethod def blank_lines_after() -> int: return 1 @abstractmethod def name(self, api=Api.PYTHON) -> str: pass @abstractmethod def code(self, api=Api.PYTHON) -> str: pass @abstractmethod def used_ctypes(self, api=Api.PYTHON) -> Set[str]: pass class DefinitionItem(CodeItem): def __init__(self, cursor: Cursor) -> None: super().__init__(cursor) assert cursor.kind == CursorKind.MACRO_DEFINITION self._capi_name = cursor.spelling if self._capi_name.endswith("_"): raise SkippableCodeItemException # OPENVR_H_ tokens = list(cursor.get_tokens())[1:] if len(tokens) > 1: raise SkippableCodeItemException # We only want simple #define values self.c_value = tokens[0].spelling self.value = self.c_value if self.value is None: raise SkippableCodeItemException # #define with no value assert self._capi_name.startswith("XR_") self._py_name = self._capi_name[3:] if self.value.endswith("LL"): self.value = self.value[:-2] if self.value.startswith("XR_"): self.value = self.value[3:] @staticmethod def blank_lines_before(): return 0 @staticmethod def blank_lines_after(): return 0 def name(self, api=Api.PYTHON) -> str: if api == api.PYTHON: return self._py_name elif api == api.C: return self._capi_name elif api == api.CTYPES: return self._capi_name else: raise NotImplementedError def code(self, api=Api.PYTHON) -> str: if api == api.C: return f"#define {self.name(api)} {self.c_value}" return f"{self.name(api)} = {self.value}" def used_ctypes(self, api=Api.PYTHON) -> Set[str]: return set() class EnumItem(CodeItem): def __init__(self, cursor: Cursor) -> None: super().__init__(cursor) assert cursor.kind == CursorKind.ENUM_DECL self._capi_name = cursor.spelling self._py_name = py_type_name(self._capi_name) self.values = [] for v in cursor.get_children(): assert v.kind == CursorKind.ENUM_CONSTANT_DECL self.values.append(EnumValueItem(cursor=v, parent=self)) @staticmethod def blank_lines_before(): return 2 @staticmethod def blank_lines_after(): return 1 def name(self, api=Api.PYTHON) -> str: if api == api.PYTHON: return self._py_name elif api == api.C: return self._capi_name elif api == api.CTYPES: return self._py_name else: raise NotImplementedError def code(self, api=Api.PYTHON) -> str: if api == api.CTYPES: result = f"{self.name(api)} = c_int" for v in self.values: result += f"\n{v.code(api)}" return result elif api == api.PYTHON: result = f"class {self.name(api)}(EnumBase):" value_count = 0 for v in self.values: if v.name(api) == "_MAX_ENUM": continue result += v.code(api) value_count += 1 if value_count < 1: result += "\n pass" return result elif api == api.C: # TODO: this is probably not tested... result = f"{self.name(api)} {{" # Q: does this already have "enum" at the beginning? for v in self.values: result += f" \n{v.code(api)}" result += "\n}" return result def used_ctypes(self, api=Api.PYTHON) -> Set[str]: return { "c_int", } class EnumValueItem(CodeItem): # Certain enums name their values differently than others _PREFIX_TABLE = { "RESULT_": "", "STRUCTURE_TYPE_": "TYPE_", "PERF_SETTINGS_NOTIFICATION_LEVEL_": "PERF_SETTINGS_NOTIF_LEVEL_", } def __init__(self, cursor: Cursor, parent: EnumItem) -> None: super().__init__(cursor) assert cursor.kind == CursorKind.ENUM_CONSTANT_DECL self.parent = parent self._capi_name = cursor.spelling self._py_name = self._make_py_name() self.value = self.cursor.enum_value def _make_py_name(self): # Compute pythonic name... n = self._capi_name assert n.startswith("XR_") n = n[3:] # Strip off initial "XR_" prefix = self.parent.name(Api.PYTHON) postfix = "" for postfix1 in ["EXT", "FB", "HTC", "KHR", "MSFT"]: if prefix.endswith(postfix1): prefix = prefix[: -len(postfix1)] postfix = f"_{postfix1}" break prefix = snake_from_camel(prefix).upper() + "_" if n == f"{prefix}MAX_ENUM{postfix}": return f"_MAX_ENUM" # private enum value if prefix in self._PREFIX_TABLE: prefix = self._PREFIX_TABLE[prefix] if not n.startswith(prefix): assert(False) n = n[len(prefix):] if len(postfix) > 0: n = n[: -len(postfix)] # It's already in the parent enum name return n @staticmethod def blank_lines_before(): return 0 @staticmethod def blank_lines_after(): return 0 def name(self, api=Api.PYTHON) -> str: if api == api.PYTHON: return self._py_name elif api == api.C: return self._capi_name elif api == api.CTYPES: return self._capi_name else: raise NotImplementedError def code(self, api=Api.PYTHON) -> str: line_end = "" line_indent = " " if api == Api.C: line_end = "," # TODO: but not the last one, right? elif api == Api.CTYPES: line_indent = "" return f"\n{line_indent}{self.name(api)} = {self.value}{line_end}" def used_ctypes(self, api=Api.PYTHON) -> Set[str]: return { "c_int", } class FlagsItem(CodeItem): def __init__(self, cursor: Cursor) -> None: super().__init__(cursor) assert cursor.kind == CursorKind.TYPEDEF_DECL self._capi_name = cursor.spelling self._py_name = py_type_name(self._capi_name) match = re.match(r"^Xr(\S+)Flags(\S*)$", self._capi_name) assert match self.core_name = match.group(1) self.vendor = match.group(2) self.value_prefix = snake_from_camel(self.core_name).upper() + "_" self.values = [] def add_value(self, cursor: Cursor) -> None: assert cursor.kind == CursorKind.VAR_DECL item = VariableItem(cursor) assert item.name().startswith(self.value_prefix) item_name = item.name()[len(self.value_prefix):] if len(self.vendor) > 0: assert item_name.endswith("_" + self.vendor) item_name = item_name[:-len(self.vendor) - 1] self.values.append([item_name, item.value]) def name(self, api=Api.PYTHON) -> str: if api == api.PYTHON: return self._py_name elif api == api.C: return self._capi_name elif api == api.CTYPES: return self._py_name else: raise NotImplementedError def code(self, api=Api.PYTHON) -> str: if api == api.CTYPES: raise NotImplementedError elif api == api.PYTHON: result = f"class {self.name(api)}(FlagBase):\n NONE = 0x00000000" for name, value in self.values: result += f"\n {name} = {value}" return result elif api == api.C: raise NotImplementedError def used_ctypes(self, api=Api.PYTHON) -> Set[str]: return set() class FunctionItem(CodeItem): def __init__(self, cursor: Cursor) -> None: super().__init__(cursor) assert cursor.kind == CursorKind.FUNCTION_DECL self._capi_name = cursor.spelling self._py_name = self._py_function_name(self._capi_name) self.parameters = [] self.return_type = None for c in cursor.get_children(): if c.kind == CursorKind.TYPE_REF: assert self.return_type is None self.return_type = parse_type(c.type) elif c.kind == CursorKind.PARM_DECL: self.parameters.append(FunctionParameterItem(c)) else: assert False @staticmethod def _py_function_name(capi_name: str) -> str: s = capi_name if s.startswith("xr"): s = s[2:] return snake_from_camel(s) @staticmethod def blank_lines_before(): return 2 @staticmethod def blank_lines_after(): return 2 def name(self, api=Api.PYTHON) -> str: if api == api.PYTHON: return self._py_name elif api == api.C: return self._capi_name elif api == api.CTYPES: return self._capi_name else: raise NotImplementedError def code(self, api=Api.PYTHON) -> str: if api == Api.CTYPES: # ctypes raw function definition result = inspect.cleandoc( f""" {self.name(Api.C)} = openxr_loader_library.{self.name(Api.C)} {self.name(Api.C)}.restype = {self.return_type.name(Api.PYTHON)} {self.name(Api.C)}.argtypes = [ """) for p in self.parameters: result += f"\n {p.type.name(api)}, # {p.name(Api.PYTHON)}" result += "\n]" return result elif api == Api.PYTHON: return str(FunctionCoder(self)) elif api == Api.C: raise NotImplementedError def used_ctypes(self, api=Api.PYTHON) -> Set[str]: result = self.return_type.used_ctypes(api) for p in self.parameters: result.update(p.used_ctypes(api)) return result class FunctionParameterItem(CodeItem): def __init__(self, cursor: Cursor): super().__init__(cursor) assert cursor.kind == CursorKind.PARM_DECL self._capi_name = cursor.spelling self._py_name = snake_from_camel(self._capi_name) self.type = parse_type(cursor.type) self._optional = False # Query xr registry to see if this parameter is optional if xr_registry: function_c_name = cursor.semantic_parent.spelling try: command = xr_registry.find(f'commands/command/proto[name="{function_c_name}"]/..') this_param = command.find(f'param[name="{self._capi_name}"]') self._optional = this_param.attrib["optional"] == "true" except Exception: pass def name(self, api=Api.PYTHON) -> str: if api == api.PYTHON: return self._py_name elif api == api.C: return self._capi_name elif api == api.CTYPES: return self._capi_name else: raise NotImplementedError def code(self, api=Api.PYTHON) -> str: pass @staticmethod def default_value() -> str: """Only applies if is_optional() is True""" return "None" def is_optional(self) -> bool: return self._optional def used_ctypes(self, api=Api.PYTHON) -> Set[str]: return self.type.used_ctypes(api) class StructFieldItem(CodeItem): def __init__(self, cursor: Cursor) -> None: super().__init__(cursor) assert cursor.kind == CursorKind.FIELD_DECL self._capi_name = cursor.spelling self._py_name = snake_from_camel(self._capi_name) if False and self.cursor.type.kind == TypeKind.INT: possible_type = tuple(self.cursor.get_tokens())[0].spelling if possible_type in PlatformType.type_map: self._py_name = PlatformType.type_map[possible_type] self.type = parse_type(cursor.type) def name(self, api=Api.PYTHON) -> str: if api == api.PYTHON: return self._py_name elif api == api.C: return self._capi_name elif api == api.CTYPES: return self._py_name else: raise NotImplementedError def code(self, api=Api.PYTHON) -> str: if api == Api.C: raise NotImplementedError return f'\n ("{self.name(api)}", {self.type.name(api)}),' def used_ctypes(self, api=Api.PYTHON) -> Set[str]: return self.type.used_ctypes(api) class StructItem(CodeItem): def __init__(self, cursor: Cursor): super().__init__(cursor) assert cursor.kind == CursorKind.STRUCT_DECL self.c_name = cursor.spelling self._capi_name = capi_type_name(self.c_name) self._py_name = py_type_name(self._capi_name) self.fields = [] for c in cursor.get_children(): if c.kind == CursorKind.FIELD_DECL: self.fields.append(StructFieldItem(c)) elif c.kind == CursorKind.UNEXPOSED_ATTR: pass # something about the typedef? elif c.kind == CursorKind.STRUCT_DECL: pass # Probably just a structure pointer, right? else: assert False self.is_recursive = False for f in self.fields: m = re.search(fr"\b{self.name(Api.CTYPES)}\b", f.type.name(Api.CTYPES)) if m: self.is_recursive = True @staticmethod def blank_lines_before(): return 2 @staticmethod def blank_lines_after(): return 2 def name(self, api=Api.PYTHON) -> str: if api == api.PYTHON: return self._py_name elif api == api.C: return self._capi_name elif api == api.CTYPES: return self._py_name else: raise NotImplementedError def code(self, api=Api.PYTHON) -> str: if api == Api.C: raise NotImplementedError result = f"class {self.name(api)}(Structure):" if len(self.fields) == 0: # Empty structure result += "\n pass" return result structure_coder = StructureCoder(self) result += structure_coder.generate_constructor() result += "\n" # Add special container methods for structures whose fields are all floats float_count = 0 for field in self.fields: if field.type.name(Api.CTYPES) == "c_float": float_count += 1 if float_count > 1 and float_count == len(self.fields):
import base64 from collections import namedtuple import errno from java.security.cert import CertificateFactory import uuid from java.io import BufferedInputStream from java.security import KeyStore, KeyStoreException from java.security.cert import CertificateParsingException from javax.net.ssl import TrustManagerFactory from javax.naming.ldap import LdapName from java.lang import IllegalArgumentException, System import logging import os import textwrap import time import re import threading try: # jarjar-ed version from org.python.netty.channel import ChannelInitializer from org.python.netty.handler.ssl import SslHandler except ImportError: # dev version from extlibs from io.netty.channel import ChannelInitializer from io.netty.handler.ssl import SslHandler from _socket import ( SSLError, raises_java_exception, SSL_ERROR_SSL, SSL_ERROR_WANT_READ, SSL_ERROR_WANT_WRITE, SSL_ERROR_WANT_X509_LOOKUP, SSL_ERROR_SYSCALL, SSL_ERROR_ZERO_RETURN, SSL_ERROR_WANT_CONNECT, SSL_ERROR_EOF, SSL_ERROR_INVALID_ERROR_CODE, SOL_SOCKET, SO_TYPE, SOCK_STREAM, socket, _socketobject, ChildSocket, error as socket_error) from _sslcerts import _get_openssl_key_manager, _extract_cert_from_data, _extract_certs_for_paths, \ NoVerifyX509TrustManager, _str_hash_key_entry, _get_ecdh_parameter_spec, CompositeX509TrustManager from _sslcerts import SSLContext as _JavaSSLContext from java.text import SimpleDateFormat from java.util import ArrayList, Locale, TimeZone, NoSuchElementException from java.util.concurrent import CountDownLatch from javax.naming.ldap import LdapName from javax.net.ssl import SSLException, SSLHandshakeException from javax.security.auth.x500 import X500Principal from org.ietf.jgss import Oid log = logging.getLogger("_socket") # Pretend to be OpenSSL OPENSSL_VERSION = "OpenSSL 1.0.0 (as emulated by Java SSL)" OPENSSL_VERSION_NUMBER = 0x1000000L OPENSSL_VERSION_INFO = (1, 0, 0, 0, 0) _OPENSSL_API_VERSION = OPENSSL_VERSION_INFO CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED = range(3) # Do not support PROTOCOL_SSLv2, it is highly insecure and it is optional _, PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1, PROTOCOL_TLSv1_1, PROTOCOL_TLSv1_2 = range(6) _PROTOCOL_NAMES = { PROTOCOL_SSLv3: 'SSLv3', PROTOCOL_SSLv23: 'SSLv23', PROTOCOL_TLSv1: 'TLSv1', PROTOCOL_TLSv1_1: 'TLSv1.1', PROTOCOL_TLSv1_2: 'TLSv1.2' } OP_ALL, OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_TLSv1 = range(4) OP_SINGLE_DH_USE, OP_NO_COMPRESSION, OP_CIPHER_SERVER_PREFERENCE, OP_SINGLE_ECDH_USE = 1048576, 131072, 4194304, 524288 VERIFY_DEFAULT, VERIFY_CRL_CHECK_LEAF, VERIFY_CRL_CHECK_CHAIN, VERIFY_X509_STRICT = 0, 4, 12, 32 CHANNEL_BINDING_TYPES = [] # https://docs.python.org/2/library/ssl.html#ssl.HAS_ALPN etc... HAS_ALPN, HAS_NPN, HAS_ECDH, HAS_SNI = False, False, True, False # TODO not supported on jython yet # Disable weak or insecure ciphers by default # (OpenSSL's default setting is 'DEFAULT:!aNULL:!eNULL') # Enable a better set of ciphers by default # This list has been explicitly chosen to: # * Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE) # * Prefer ECDHE over DHE for better performance # * Prefer any AES-GCM over any AES-CBC for better performance and security # * Then Use HIGH cipher suites as a fallback # * Then Use 3DES as fallback which is secure but slow # * Disable NULL authentication, NULL encryption, and MD5 MACs for security # reasons _DEFAULT_CIPHERS = ( 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:' 'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:!aNULL:' '!eNULL:!MD5' ) # TODO not supported on jython yet # Restricted and more secure ciphers for the server side # This list has been explicitly chosen to: # * Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE) # * Prefer ECDHE over DHE for better performance # * Prefer any AES-GCM over any AES-CBC for better performance and security # * Then Use HIGH cipher suites as a fallback # * Then Use 3DES as fallback which is secure but slow # * Disable NULL authentication, NULL encryption, MD5 MACs, DSS, and RC4 for # security reasons _RESTRICTED_SERVER_CIPHERS = ( 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:' 'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:!aNULL:' '!eNULL:!MD5:!DSS:!RC4' ) _rfc2822_date_format = SimpleDateFormat("MMM dd HH:mm:ss yyyy z", Locale.US) _rfc2822_date_format.setTimeZone(TimeZone.getTimeZone("GMT")) _ldap_rdn_display_names = { # list from RFC 2253 "CN": "commonName", "E": "emailAddress", "L": "localityName", "ST": "stateOrProvinceName", "O": "organizationName", "OU": "organizationalUnitName", "C": "countryName", "STREET": "streetAddress", "DC": "domainComponent", "UID": "userid" } _cert_name_types = [ # Fields documented in # http://docs.oracle.com/javase/7/docs/api/java/security/cert/X509Certificate.html#getSubjectAlternativeNames() "other", "rfc822", "DNS", "x400Address", "directory", "ediParty", "uniformResourceIdentifier", "ipAddress", "registeredID"] def _str_or_unicode(s): try: return s.encode('ascii') except UnicodeEncodeError: return s except AttributeError: return str(s) class CertificateError(ValueError): pass # TODO for now create these exceptions here to conform with API class SSLZeroReturnError(SSLError): pass class SSLWantReadError(SSLError): pass class SSLWantWriteError(SSLError): pass class SSLSyscallError(SSLError): pass class SSLEOFError(SSLError): pass def _dnsname_match(dn, hostname, max_wildcards=1): """Matching according to RFC 6125, section 6.4.3 http://tools.ietf.org/html/rfc6125#section-6.4.3 """ pats = [] if not dn: return False pieces = dn.split(r'.') leftmost = pieces[0] remainder = pieces[1:] wildcards = leftmost.count('*') if wildcards > max_wildcards: # Issue #17980: avoid denials of service by refusing more # than one wildcard per fragment. A survery of established # policy among SSL implementations showed it to be a # reasonable choice. raise CertificateError( "too many wildcards in certificate DNS name: " + repr(dn)) # speed up common case w/o wildcards if not wildcards: return dn.lower() == hostname.lower() # RFC 6125, section 6.4.3, subitem 1. # The client SHOULD NOT attempt to match a presented identifier in which # the wildcard character comprises a label other than the left-most label. if leftmost == '*': # When '*' is a fragment by itself, it matches a non-empty dotless # fragment. pats.append('[^.]+') elif leftmost.startswith('xn--') or hostname.startswith('xn--'): # RFC 6125, section 6.4.3, subitem 3. # The client SHOULD NOT attempt to match a presented identifier # where the wildcard character is embedded within an A-label or # U-label of an internationalized domain name. pats.append(re.escape(leftmost)) else: # Otherwise, '*' matches any dotless string, e.g. www* pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) # add the remaining fragments, ignore any wildcards for frag in remainder: pats.append(re.escape(frag)) pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) return pat.match(hostname) def match_hostname(cert, hostname): """Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 rules are followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing. """ if not cert: raise ValueError("empty or no certificate, match_hostname needs a " "SSL socket or SSL context with either " "CERT_OPTIONAL or CERT_REQUIRED") dnsnames = [] san = cert.get('subjectAltName', ()) for key, value in san: if key == 'DNS': if _dnsname_match(value, hostname): return dnsnames.append(value) if not dnsnames: # The subject is only checked when there is no dNSName entry # in subjectAltName for sub in cert.get('subject', ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. if key == 'commonName': if _dnsname_match(value, hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError("hostname %r " "doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames)))) elif len(dnsnames) == 1: raise CertificateError("hostname %r " "doesn't match %r" % (hostname, dnsnames[0])) else: raise CertificateError("no appropriate commonName or " "subjectAltName fields were found") DefaultVerifyPaths = namedtuple("DefaultVerifyPaths", "cafile capath openssl_cafile_env openssl_cafile openssl_capath_env " "openssl_capath") def get_default_verify_paths(): """Return paths to default cafile and capath. """ cafile, capath = None, None default_cert_dir_env = os.environ.get('SSL_CERT_DIR', None) default_cert_file_env = os.environ.get('SSL_CERT_FILE', None) java_cert_file = System.getProperty('javax.net.ssl.trustStore') if java_cert_file is not None and os.path.isfile(java_cert_file): cafile = java_cert_file capath = os.path.dirname(java_cert_file) else: if default_cert_dir_env is not None: capath = default_cert_dir_env if os.path.isdir(default_cert_dir_env) else None if default_cert_file_env is not None: cafile = default_cert_file_env if os.path.isfile(default_cert_file_env) else None if cafile is None: # http://docs.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html java_home = System.getProperty('java.home') for _path in ('lib/security/jssecacerts', 'lib/security/cacerts'): java_cert_file = os.path.join(java_home, _path) if os.path.isfile(java_cert_file): cafile = java_cert_file capath = os.path.dirname(cafile) return DefaultVerifyPaths(cafile if os.path.isfile(cafile) else None, capath if os.path.isdir(capath) else None, 'SSL_CERT_FILE', default_cert_file_env, 'SSL_CERT_DIR', default_cert_dir_env) class _ASN1Object(namedtuple("_ASN1Object", "nid shortname longname oid")): """ASN.1 object identifier lookup """ __slots__ = () def __new__(cls, oid): # TODO, just fake it for now if oid == '192.168.3.11.5.172.16.58.3': return super(_ASN1Object, cls).__new__(cls, 129, 'serverAuth', 'TLS Web Server Authentication', oid) elif oid == '192.168.3.11.5.5.7.3.2': return super(_ASN1Object, cls).__new__(cls, 130, 'clientAuth', 'clientAuth', oid) raise ValueError() class Purpose(_ASN1Object): """SSLContext purpose flags with X509v3 Extended Key Usage objects """ Purpose.SERVER_AUTH = Purpose('1.3.6.1.5.5.7.3.1') Purpose.CLIENT_AUTH = Purpose('192.168.3.11.5.5.7.3.2') def create_default_context(purpose=Purpose.SERVER_AUTH, cafile=None, capath=None, cadata=None): """Create a SSLContext object with default settings. NOTE: The protocol and settings may change anytime without prior deprecation. The values represent a fair balance between maximum compatibility and security. """ if not isinstance(purpose, _ASN1Object): raise TypeError(purpose) context = SSLContext(PROTOCOL_SSLv23) # SSLv2 considered harmful. context.options |= OP_NO_SSLv2 # SSLv3 has problematic security and is only required for really old # clients such as IE6 on Windows XP context.options |= OP_NO_SSLv3 # disable compression to prevent CRIME attacks (OpenSSL 1.0+) # TODO not supported on Jython # context.options |= getattr(_ssl, "OP_NO_COMPRESSION", 0) if purpose == Purpose.SERVER_AUTH: # verify certs and host name in client mode context.verify_mode = CERT_REQUIRED context.check_hostname = True elif purpose == Purpose.CLIENT_AUTH: pass # TODO commeted out by darjus, none of the below is supported :( # # Prefer the server's ciphers by default so that we get stronger # # encryption # context.options |= getattr(_ssl, "OP_CIPHER_SERVER_PREFERENCE", 0) # # # Use single use keys in order to improve forward secrecy # context.options |= getattr(_ssl, "OP_SINGLE_DH_USE", 0) # context.options |= getattr(_ssl, "OP_SINGLE_ECDH_USE", 0) # # # disallow ciphers with known vulnerabilities # context.set_ciphers(_RESTRICTED_SERVER_CIPHERS) if cafile or capath or cadata: context.load_verify_locations(cafile, capath, cadata) elif context.verify_mode != CERT_NONE: # no explicit
""" Utility functions for champs coompetition LGB 1. Training using LGB 2. Hyperopt """ import numpy as np from numpy.linalg import svd, norm from scipy.stats import hmean import pandas as pd import os from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split, StratifiedKFold, KFold, GroupKFold from sklearn.metrics import mean_absolute_error from sklearn.preprocessing import LabelEncoder, MinMaxScaler from sklearn import metrics from sklearn import linear_model import lightgbm as lgb import time import datetime from functools import partial import matplotlib.pyplot as plt import seaborn as sns sns.set(); import gc from contextlib import contextmanager def plot_feature_importance(model, features, importance_type='gain', num_features=10): feature_importance = model.feature_importance(importance_type=importance_type) feature_importance = pd.DataFrame({'Features': features, 'Importance': feature_importance})\ .sort_values('Importance', ascending = False) fig = plt.figure(figsize = (5, 10)) fig.suptitle('Feature Importance', fontsize = 20) plt.tick_params(axis = 'x', labelsize = 12) plt.tick_params(axis = 'y', labelsize = 12) plt.xlabel('Importance', fontsize = 15) plt.ylabel('Features', fontsize = 15) sns.barplot(x = feature_importance['Importance'][:num_features], y = feature_importance['Features'][:num_features], orient = 'h') plt.show() def group_mean_log_mae(y_true, y_pred, types, floor=1e-9): """ Fast metric computation for this competition: https://www.kaggle.com/c/champs-scalar-coupling Code is from this kernel: https://www.kaggle.com/uberkinder/efficient-metric """ maes = (y_true-y_pred).abs().groupby(types).mean() return np.log(maes.map(lambda x: max(x, floor))).mean() def train_model_regression(X, X_test, y, params, folds, model_type='lgb', eval_metric='mae', columns=None, plot_feature_importance=False, model=None, verbose=10000, early_stopping_rounds=200, n_estimators=50000): """ A function to train a variety of regression models. Returns dictionary with oof predictions, test predictions, scores and, if necessary, feature importances. :params: X - training data, can be pd.DataFrame or np.ndarray (after normalizing) :params: X_test - test data, can be pd.DataFrame or np.ndarray (after normalizing) :params: y - target :params: folds - folds to split data :params: model_type - type of model to use :params: eval_metric - metric to use :params: columns - columns to use. If None - use all columns :params: plot_feature_importance - whether to plot feature importance of LGB :params: model - sklearn model, works only for "sklearn" model type """ columns = X.columns if columns is None else columns X_test = X_test[columns] # to set up scoring parameters metrics_dict = {'mae': {'lgb_metric_name': 'mae', 'catboost_metric_name': 'MAE', 'sklearn_scoring_function': metrics.mean_absolute_error}, 'group_mae': {'lgb_metric_name': 'mae', 'catboost_metric_name': 'MAE', 'scoring_function': group_mean_log_mae}, 'mse': {'lgb_metric_name': 'mse', 'catboost_metric_name': 'MSE', 'sklearn_scoring_function': metrics.mean_squared_error} } result_dict = {} # out-of-fold predictions on train data oof = np.zeros(len(X)) # averaged predictions on train data prediction = np.zeros(len(X_test)) # list of scores on folds scores = [] feature_importance = pd.DataFrame() # split and train on folds for fold_n, (train_index, valid_index) in enumerate(folds.split(X)): print(f'\nFold {fold_n + 1} started at {time.ctime()}') if type(X) == np.ndarray: X_train, X_valid = X[columns][train_index], X[columns][valid_index] y_train, y_valid = y[train_index], y[valid_index] else: X_train, X_valid = X[columns].iloc[train_index], X[columns].iloc[valid_index] y_train, y_valid = y.iloc[train_index], y.iloc[valid_index] if model_type == 'lgb': model = lgb.LGBMRegressor(**params, n_estimators = n_estimators, n_jobs = -1) model.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_valid, y_valid)], eval_metric=metrics_dict[eval_metric]['lgb_metric_name'], verbose=verbose, early_stopping_rounds=early_stopping_rounds) y_pred_valid = model.predict(X_valid) y_pred = model.predict(X_test, num_iteration=model.best_iteration_) if model_type == 'xgb': train_data = xgb.DMatrix(data=X_train, label=y_train, feature_names=X.columns) valid_data = xgb.DMatrix(data=X_valid, label=y_valid, feature_names=X.columns) watchlist = [(train_data, 'train'), (valid_data, 'valid_data')] model = xgb.train(dtrain=train_data, num_boost_round=20000, evals=watchlist, early_stopping_rounds=200, verbose_eval=verbose, params=params) y_pred_valid = model.predict(xgb.DMatrix(X_valid, feature_names=X.columns), ntree_limit=model.best_ntree_limit) y_pred = model.predict(xgb.DMatrix(X_test, feature_names=X.columns), ntree_limit=model.best_ntree_limit) if model_type == 'sklearn': model = model model.fit(X_train, y_train) y_pred_valid = model.predict(X_valid).reshape(-1,) score = metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid) print(f'Fold {fold_n}. {eval_metric}: {score:.4f}.') print('') y_pred = model.predict(X_test).reshape(-1,) if model_type == 'cat': model = CatBoostRegressor(iterations=20000, eval_metric=metrics_dict[eval_metric]['catboost_metric_name'], **params, loss_function=metrics_dict[eval_metric]['catboost_metric_name']) model.fit(X_train, y_train, eval_set=(X_valid, y_valid), cat_features=[], use_best_model=True, verbose=False) y_pred_valid = model.predict(X_valid) y_pred = model.predict(X_test) oof[valid_index] = y_pred_valid.reshape(-1,) if eval_metric != 'group_mae': scores.append(metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid)) else: scores.append(metrics_dict[eval_metric]['scoring_function'](y_valid, y_pred_valid, X_valid['type'])) prediction += y_pred if model_type == 'lgb' and plot_feature_importance: # feature importance fold_importance = pd.DataFrame() fold_importance["feature"] = columns fold_importance["importance"] = model.feature_importances_ fold_importance["fold"] = fold_n + 1 feature_importance = pd.concat([feature_importance, fold_importance], axis=0) prediction /= folds.n_splits print('CV mean score: {0:.6f}, std: {1:.6f}.\n'.format(np.mean(scores), np.std(scores))) result_dict['oof'] = oof result_dict['prediction'] = prediction result_dict['scores'] = scores if model_type == 'lgb': if plot_feature_importance: feature_importance["importance"] /= folds.n_splits cols = feature_importance[["feature", "importance"]].groupby("feature").mean().sort_values( by="importance", ascending=False)[:50].index best_features = feature_importance.loc[feature_importance.feature.isin(cols)] plt.figure(figsize=(16, 12)); sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False)); plt.title('LGB Features (avg over folds)'); result_dict['feature_importance'] = feature_importance return result_dict def train_lgb_regression_group(X, X_test, y, params, folds, groups, eval_metric='mae', columns=None, plot_feature_importance=False, model=None, verbose=10000, early_stopping_rounds=200, n_estimators=50000): """ A function to train a variety of regression models. Returns dictionary with oof predictions, test predictions, scores and, if necessary, feature importances. :params: X - training data, can be pd.DataFrame or np.ndarray (after normalizing) :params: X_test - test data, can be pd.DataFrame or np.ndarray (after normalizing) :params: y - target :params: folds - Group Kfolds to split data :params: model_type - type of model to use :params: eval_metric - metric to use :params: columns - columns to use. If None - use all columns :params: plot_feature_importance - whether to plot feature importance of LGB :params: model - sklearn model, works only for "sklearn" model type """ columns = X.columns if columns is None else columns X_test = X_test[columns] # to set up scoring parameters metrics_dict = {'mae': {'lgb_metric_name': 'mae', 'sklearn_scoring_function': metrics.mean_absolute_error}, 'group_mae': {'lgb_metric_name': 'mae', 'scoring_function': group_mean_log_mae}, 'mse': {'lgb_metric_name': 'mse', 'sklearn_scoring_function': metrics.mean_squared_error} } result_dict = {} # out-of-fold predictions on train data oof = np.zeros(len(X)) # averaged predictions on train data prediction = np.zeros(len(X_test)) # list of scores on folds scores = [] feature_importance = pd.DataFrame() if groups is not None: folds_splits = folds.split(X,groups=groups) else: folds_splits = folds.split(X) # split and train on folds for fold_n, (train_index, valid_index) in enumerate(folds_splits): print(f'\nFold {fold_n + 1} started at {time.ctime()}') if type(X) == np.ndarray: X_train, X_valid = X[columns][train_index], X[columns][valid_index] y_train, y_valid = y[train_index], y[valid_index] else: X_train, X_valid = X[columns].iloc[train_index], X[columns].iloc[valid_index] y_train, y_valid = y.iloc[train_index], y.iloc[valid_index] model = lgb.LGBMRegressor(**params, n_estimators = n_estimators, n_jobs = -1) model.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_valid, y_valid)], eval_metric=metrics_dict[eval_metric]['lgb_metric_name'], verbose=verbose, early_stopping_rounds=early_stopping_rounds) y_pred_valid = model.predict(X_valid) y_pred = model.predict(X_test, num_iteration=model.best_iteration_) oof[valid_index] = y_pred_valid.reshape(-1,) if eval_metric != 'group_mae': scores.append(metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid)) else: scores.append(metrics_dict[eval_metric]['scoring_function'](y_valid, y_pred_valid, X_valid['type'])) prediction += y_pred if plot_feature_importance: # feature importance fold_importance = pd.DataFrame() fold_importance["feature"] = columns fold_importance["importance"] = model.feature_importances_ fold_importance["fold"] = fold_n + 1 feature_importance = pd.concat([feature_importance, fold_importance], axis=0) prediction /= folds.n_splits print('CV mean score: {0:.6f}, std: {1:.6f}.\n'.format(np.mean(scores), np.std(scores))) result_dict['oof'] = oof result_dict['prediction'] = prediction result_dict['scores'] = scores if plot_feature_importance: feature_importance["importance"] /= folds.n_splits cols = feature_importance[["feature", "importance"]].groupby("feature").mean().sort_values( by="importance", ascending=False)[:50].index best_features = feature_importance.loc[feature_importance.feature.isin(cols)] plt.figure(figsize=(16, 12)); sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False)); plt.title('LGB Features (avg over folds)'); result_dict['feature_importance'] = feature_importance return result_dict ############################# from hyperopt import hp, tpe, Trials, space_eval, STATUS_OK, STATUS_RUNNING from hyperopt.fmin import fmin from hyperopt.pyll.stochastic import sample #optional but advised #GLOBAL HYPEROPT PARAMETERS NUM_EVALS = 1000 #number of hyperopt evaluation rounds N_FOLDS = 5 #number of cross-validation folds on data in each evaluation round #LIGHTGBM PARAMETERS LGBM_MAX_LEAVES = 2**11 #maximum number of leaves per tree for LightGBM LGBM_MAX_DEPTH = 25 #maximum tree depth for LightGBM EVAL_METRIC_LGBM_REG = 'mae' #LightGBM regression metric. Note that 'rmse' is more commonly used EVAL_METRIC_LGBM_CLASS = 'auc' #LightGBM classification metric #XGBOOST PARAMETERS XGB_MAX_LEAVES = 2**12 #maximum number of leaves when using histogram splitting XGB_MAX_DEPTH = 25 #maximum tree depth for XGBoost EVAL_METRIC_XGB_REG = 'mae' #XGBoost regression metric EVAL_METRIC_XGB_CLASS = 'auc' #XGBoost classification metric #CATBOOST PARAMETERS CB_MAX_DEPTH = 8 #maximum tree depth in CatBoost OBJECTIVE_CB_REG = 'MAE' #CatBoost regression metric OBJECTIVE_CB_CLASS = 'Logloss' #CatBoost classification metric #OPTIONAL OUTPUT BEST_SCORE = 0 def quick_hyperopt(data, labels, package='lgbm', num_evals=NUM_EVALS, diagnostic=False, Class=False): #========== #LightGBM #========== if package=='lgbm': print('Running {} rounds of LightGBM parameter optimisation:'.format(num_evals)) #clear space gc.collect() integer_params = ['max_depth', 'num_leaves', 'max_bin', 'min_data_in_leaf', 'min_data_in_bin'] def objective(space_params): #cast integer params from float to int for param in integer_params: space_params[param] = int(space_params[param]) #extract nested conditional parameters if space_params['boosting']['boosting'] == 'goss': top_rate = space_params['boosting'].get('top_rate') other_rate = space_params['boosting'].get('other_rate') #0 <= top_rate + other_rate <= 1 top_rate = max(top_rate, 0) top_rate = min(top_rate, 0.5) other_rate = max(other_rate, 0) other_rate = min(other_rate, 0.5) space_params['top_rate'] = top_rate space_params['other_rate'] = other_rate subsample = space_params['boosting'].get('subsample', 1.0) space_params['boosting'] = space_params['boosting']['boosting'] space_params['subsample'] = subsample if Class: cv_results = lgb.cv(space_params, train, nfold = N_FOLDS, stratified=True, early_stopping_rounds=100, metrics=EVAL_METRIC_LGBM_CLASS, seed=42) best_loss = 1 - cv_results['auc-mean'][-1] else: cv_results = lgb.cv(space_params, train, nfold = N_FOLDS, stratified=False, early_stopping_rounds=100, metrics=EVAL_METRIC_LGBM_REG, seed=42) best_loss = cv_results['l1-mean'][-1]
ClassVar[str] = "path_expression" class_model_uri: ClassVar[URIRef] = LINKML.PathExpression followed_by: Optional[Union[dict, "PathExpression"]] = None none_of: Optional[Union[Union[dict, "PathExpression"], List[Union[dict, "PathExpression"]]]] = empty_list() any_of: Optional[Union[Union[dict, "PathExpression"], List[Union[dict, "PathExpression"]]]] = empty_list() all_of: Optional[Union[Union[dict, "PathExpression"], List[Union[dict, "PathExpression"]]]] = empty_list() exactly_one_of: Optional[Union[Union[dict, "PathExpression"], List[Union[dict, "PathExpression"]]]] = empty_list() reversed: Optional[Union[bool, Bool]] = None traverse: Optional[Union[str, SlotDefinitionName]] = None range_expression: Optional[Union[dict, "AnonymousClassExpression"]] = None extensions: Optional[Union[Dict[Union[str, ExtensionTag], Union[dict, Extension]], List[Union[dict, Extension]]]] = empty_dict() annotations: Optional[Union[Dict[Union[str, AnnotationTag], Union[dict, Annotation]], List[Union[dict, Annotation]]]] = empty_dict() description: Optional[str] = None alt_descriptions: Optional[Union[Dict[Union[str, AltDescriptionSource], Union[dict, "AltDescription"]], List[Union[dict, "AltDescription"]]]] = empty_dict() title: Optional[str] = None deprecated: Optional[str] = None todos: Optional[Union[str, List[str]]] = empty_list() notes: Optional[Union[str, List[str]]] = empty_list() comments: Optional[Union[str, List[str]]] = empty_list() examples: Optional[Union[Union[dict, "Example"], List[Union[dict, "Example"]]]] = empty_list() in_subset: Optional[Union[Union[str, SubsetDefinitionName], List[Union[str, SubsetDefinitionName]]]] = empty_list() from_schema: Optional[Union[str, URI]] = None imported_from: Optional[str] = None source: Optional[Union[str, URIorCURIE]] = None in_language: Optional[str] = None see_also: Optional[Union[Union[str, URIorCURIE], List[Union[str, URIorCURIE]]]] = empty_list() deprecated_element_has_exact_replacement: Optional[Union[str, URIorCURIE]] = None deprecated_element_has_possible_replacement: Optional[Union[str, URIorCURIE]] = None aliases: Optional[Union[str, List[str]]] = empty_list() structured_aliases: Optional[Union[Union[dict, StructuredAlias], List[Union[dict, StructuredAlias]]]] = empty_list() mappings: Optional[Union[Union[str, URIorCURIE], List[Union[str, URIorCURIE]]]] = empty_list() exact_mappings: Optional[Union[Union[str, URIorCURIE], List[Union[str, URIorCURIE]]]] = empty_list() close_mappings: Optional[Union[Union[str, URIorCURIE], List[Union[str, URIorCURIE]]]] = empty_list() related_mappings: Optional[Union[Union[str, URIorCURIE], List[Union[str, URIorCURIE]]]] = empty_list() narrow_mappings: Optional[Union[Union[str, URIorCURIE], List[Union[str, URIorCURIE]]]] = empty_list() broad_mappings: Optional[Union[Union[str, URIorCURIE], List[Union[str, URIorCURIE]]]] = empty_list() rank: Optional[int] = None def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]): if self.followed_by is not None and not isinstance(self.followed_by, PathExpression): self.followed_by = PathExpression(**as_dict(self.followed_by)) if not isinstance(self.none_of, list): self.none_of = [self.none_of] if self.none_of is not None else [] self.none_of = [v if isinstance(v, PathExpression) else PathExpression(**as_dict(v)) for v in self.none_of] if not isinstance(self.any_of, list): self.any_of = [self.any_of] if self.any_of is not None else [] self.any_of = [v if isinstance(v, PathExpression) else PathExpression(**as_dict(v)) for v in self.any_of] if not isinstance(self.all_of, list): self.all_of = [self.all_of] if self.all_of is not None else [] self.all_of = [v if isinstance(v, PathExpression) else PathExpression(**as_dict(v)) for v in self.all_of] if not isinstance(self.exactly_one_of, list): self.exactly_one_of = [self.exactly_one_of] if self.exactly_one_of is not None else [] self.exactly_one_of = [v if isinstance(v, PathExpression) else PathExpression(**as_dict(v)) for v in self.exactly_one_of] if self.reversed is not None and not isinstance(self.reversed, Bool): self.reversed = Bool(self.reversed) if self.traverse is not None and not isinstance(self.traverse, SlotDefinitionName): self.traverse = SlotDefinitionName(self.traverse) if self.range_expression is not None and not isinstance(self.range_expression, AnonymousClassExpression): self.range_expression = AnonymousClassExpression(**as_dict(self.range_expression)) self._normalize_inlined_as_dict(slot_name="extensions", slot_type=Extension, key_name="tag", keyed=True) self._normalize_inlined_as_dict(slot_name="annotations", slot_type=Annotation, key_name="tag", keyed=True) if self.description is not None and not isinstance(self.description, str): self.description = str(self.description) self._normalize_inlined_as_dict(slot_name="alt_descriptions", slot_type=AltDescription, key_name="source", keyed=True) if self.title is not None and not isinstance(self.title, str): self.title = str(self.title) if self.deprecated is not None and not isinstance(self.deprecated, str): self.deprecated = str(self.deprecated) if not isinstance(self.todos, list): self.todos = [self.todos] if self.todos is not None else [] self.todos = [v if isinstance(v, str) else str(v) for v in self.todos] if not isinstance(self.notes, list): self.notes = [self.notes] if self.notes is not None else [] self.notes = [v if isinstance(v, str) else str(v) for v in self.notes] if not isinstance(self.comments, list): self.comments = [self.comments] if self.comments is not None else [] self.comments = [v if isinstance(v, str) else str(v) for v in self.comments] if not isinstance(self.examples, list): self.examples = [self.examples] if self.examples is not None else [] self.examples = [v if isinstance(v, Example) else Example(**as_dict(v)) for v in self.examples] if not isinstance(self.in_subset, list): self.in_subset = [self.in_subset] if self.in_subset is not None else [] self.in_subset = [v if isinstance(v, SubsetDefinitionName) else SubsetDefinitionName(v) for v in self.in_subset] if self.from_schema is not None and not isinstance(self.from_schema, URI): self.from_schema = URI(self.from_schema) if self.imported_from is not None and not isinstance(self.imported_from, str): self.imported_from = str(self.imported_from) if self.source is not None and not isinstance(self.source, URIorCURIE): self.source = URIorCURIE(self.source) if self.in_language is not None and not isinstance(self.in_language, str): self.in_language = str(self.in_language) if not isinstance(self.see_also, list): self.see_also = [self.see_also] if self.see_also is not None else [] self.see_also = [v if isinstance(v, URIorCURIE) else URIorCURIE(v) for v in self.see_also] if self.deprecated_element_has_exact_replacement is not None and not isinstance(self.deprecated_element_has_exact_replacement, URIorCURIE): self.deprecated_element_has_exact_replacement = URIorCURIE(self.deprecated_element_has_exact_replacement) if self.deprecated_element_has_possible_replacement is not None and not isinstance(self.deprecated_element_has_possible_replacement, URIorCURIE): self.deprecated_element_has_possible_replacement = URIorCURIE(self.deprecated_element_has_possible_replacement) if not isinstance(self.aliases, list): self.aliases = [self.aliases] if self.aliases is not None else [] self.aliases = [v if isinstance(v, str) else str(v) for v in self.aliases] self._normalize_inlined_as_dict(slot_name="structured_aliases", slot_type=StructuredAlias, key_name="literal_form", keyed=False) if not isinstance(self.mappings, list): self.mappings = [self.mappings] if self.mappings is not None else [] self.mappings = [v if isinstance(v, URIorCURIE) else URIorCURIE(v) for v in self.mappings] if not isinstance(self.exact_mappings, list): self.exact_mappings = [self.exact_mappings] if self.exact_mappings is not None else [] self.exact_mappings = [v if isinstance(v, URIorCURIE) else URIorCURIE(v) for v in self.exact_mappings] if not isinstance(self.close_mappings, list): self.close_mappings = [self.close_mappings] if self.close_mappings is not None else [] self.close_mappings = [v if isinstance(v, URIorCURIE) else URIorCURIE(v) for v in self.close_mappings] if not isinstance(self.related_mappings, list): self.related_mappings = [self.related_mappings] if self.related_mappings is not None else [] self.related_mappings = [v if isinstance(v, URIorCURIE) else URIorCURIE(v) for v in self.related_mappings] if not isinstance(self.narrow_mappings, list): self.narrow_mappings = [self.narrow_mappings] if self.narrow_mappings is not None else [] self.narrow_mappings = [v if isinstance(v, URIorCURIE) else URIorCURIE(v) for v in self.narrow_mappings] if not isinstance(self.broad_mappings, list): self.broad_mappings = [self.broad_mappings] if self.broad_mappings is not None else [] self.broad_mappings = [v if isinstance(v, URIorCURIE) else URIorCURIE(v) for v in self.broad_mappings] if self.rank is not None and not isinstance(self.rank, int): self.rank = int(self.rank) super().__post_init__(**kwargs) @dataclass class SlotExpression(Expression): """ an expression that constrains the range of values a slot can take """ _inherited_slots: ClassVar[List[str]] = ["range", "required", "recommended", "inlined", "inlined_as_list", "minimum_value", "maximum_value", "pattern", "structured_pattern", "equals_string", "equals_string_in", "equals_number", "equals_expression", "minimum_cardinality", "maximum_cardinality"] class_class_uri: ClassVar[URIRef] = LINKML.SlotExpression class_class_curie: ClassVar[str] = "linkml:SlotExpression" class_name: ClassVar[str] = "slot_expression" class_model_uri: ClassVar[URIRef] = LINKML.SlotExpression range: Optional[Union[str, ElementName]] = None range_expression: Optional[Union[dict, "AnonymousClassExpression"]] = None required: Optional[Union[bool, Bool]] = None recommended: Optional[Union[bool, Bool]] = None inlined: Optional[Union[bool, Bool]] = None inlined_as_list: Optional[Union[bool, Bool]] = None minimum_value: Optional[int] = None maximum_value: Optional[int] = None pattern: Optional[str] = None structured_pattern: Optional[Union[dict, "PatternExpression"]] = None equals_string: Optional[str] = None equals_string_in: Optional[Union[str, List[str]]] = empty_list() equals_number: Optional[int] = None equals_expression: Optional[str] = None minimum_cardinality: Optional[int] = None maximum_cardinality: Optional[int] = None has_member: Optional[Union[dict, "AnonymousSlotExpression"]] = None all_members: Optional[Union[Dict[Union[str, SlotDefinitionName], Union[dict, "SlotDefinition"]], List[Union[dict, "SlotDefinition"]]]] = empty_dict() none_of: Optional[Union[Union[dict, "AnonymousSlotExpression"], List[Union[dict, "AnonymousSlotExpression"]]]] = empty_list() exactly_one_of: Optional[Union[Union[dict, "AnonymousSlotExpression"], List[Union[dict, "AnonymousSlotExpression"]]]] = empty_list() any_of: Optional[Union[Union[dict, "AnonymousSlotExpression"], List[Union[dict, "AnonymousSlotExpression"]]]] = empty_list() all_of: Optional[Union[Union[dict, "AnonymousSlotExpression"], List[Union[dict, "AnonymousSlotExpression"]]]] = empty_list() def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]): if self.range is not None and not isinstance(self.range, ElementName): self.range = ElementName(self.range) if self.range_expression is not None and not isinstance(self.range_expression, AnonymousClassExpression): self.range_expression = AnonymousClassExpression(**as_dict(self.range_expression)) if self.required is not None and not isinstance(self.required, Bool): self.required = Bool(self.required) if self.recommended is not None and not isinstance(self.recommended, Bool): self.recommended = Bool(self.recommended) if self.inlined is not None and not isinstance(self.inlined, Bool): self.inlined = Bool(self.inlined) if self.inlined_as_list is not None and not isinstance(self.inlined_as_list, Bool): self.inlined_as_list = Bool(self.inlined_as_list) if self.minimum_value is not None and not isinstance(self.minimum_value, int): self.minimum_value = int(self.minimum_value) if self.maximum_value is not None and not isinstance(self.maximum_value, int): self.maximum_value = int(self.maximum_value) if self.pattern is not None and not isinstance(self.pattern, str): self.pattern = str(self.pattern) if self.structured_pattern is not None and not isinstance(self.structured_pattern, PatternExpression): self.structured_pattern = PatternExpression(**as_dict(self.structured_pattern)) if self.equals_string is not None and not isinstance(self.equals_string, str): self.equals_string = str(self.equals_string) if not isinstance(self.equals_string_in, list): self.equals_string_in = [self.equals_string_in] if self.equals_string_in is not None else [] self.equals_string_in = [v if isinstance(v, str) else str(v) for v in self.equals_string_in] if self.equals_number is not None and not isinstance(self.equals_number, int): self.equals_number = int(self.equals_number) if self.equals_expression is not None and not isinstance(self.equals_expression, str): self.equals_expression = str(self.equals_expression) if self.minimum_cardinality is not None and not isinstance(self.minimum_cardinality, int): self.minimum_cardinality = int(self.minimum_cardinality) if self.maximum_cardinality is not None and not isinstance(self.maximum_cardinality, int): self.maximum_cardinality = int(self.maximum_cardinality) if self.has_member is not None and not isinstance(self.has_member, AnonymousSlotExpression): self.has_member = AnonymousSlotExpression(**as_dict(self.has_member)) self._normalize_inlined_as_dict(slot_name="all_members", slot_type=SlotDefinition, key_name="name", keyed=True) if not isinstance(self.none_of, list): self.none_of = [self.none_of] if self.none_of is not None else [] self.none_of = [v if isinstance(v, AnonymousSlotExpression) else AnonymousSlotExpression(**as_dict(v)) for v in self.none_of] if not isinstance(self.exactly_one_of, list): self.exactly_one_of = [self.exactly_one_of] if self.exactly_one_of is not None
which to sort the scores scores_sort = np.argsort(-scores)[:max_detections] image_boxes = boxes[0, indices[scores_sort], :] # print('seletec_boxes',image_boxes.shape) # print(image_boxes) # filter out of lung if args.lung_filter: client_paths = ['private_1', 'private_2', 'private_3'] # client_paths = ['private_4/B'] lung_filter_path = '/research/dept8/qdou/data/covid/{}/lung_seg_png/'.format(client_paths[client_idx]) # lungfilter = '/covid/private_2/lung_seg_png/ # print('---img path---') img_path = generator.image_path(i) patient = img_path.split('/')[-2] slice_idx = img_path.split('/')[-1].replace('slice_', '').replace('.h5', '') # print('patient:', patient) # print('slice:', slice_idx) seg_path = os.path.join(lung_filter_path,'{}_slice_{}.png').format(patient,slice_idx) # print(seg_path) seg = cv2.imread(seg_path) filter_mask = np.zeros([1, 512, 512, 1]) filter_mask[0, np.where(seg == 255)[0], np.where(seg == 255)[1], 0] = masks[0, np.where(seg == 255)[0], np.where(seg == 255)[1], 0] scores_sort = _seg_filter(image_boxes,scores_sort,seg) image_boxes = boxes[0, indices[scores_sort], :] image_scores = scores[scores_sort] image_labels = labels[0, indices[scores_sort]] image_detections = np.concatenate([image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1) # copy detections to all_detections for label in range(generator.num_classes()): if not generator.has_label(label): continue all_detections[i][label] = image_detections[image_detections[:, -1] == label, :-1] if args.save_result == 1: img_path = generator.image_path(i) img_path = img_path.replace('h5_normalize', 'h5') # print(img_path) with h5py.File(img_path, "r") as hf: h5_raw_image = hf['arr'][:] draw_annotations(h5_raw_image, generator.load_annotations(i), label_to_name=generator.label_to_name) # draw_detections(raw_image, image_boxes, image_scores, image_labels, score_threshold=args.score_threshold, label_to_name=generator.label_to_name) draw_detections(h5_raw_image, image_boxes, image_scores, image_labels, slice_id=i, bbox_writer=result_writer, score_threshold=args.score_threshold) # if args.lung_filter: # slice_idx = generator.image_path(i).split('/')[-1].replace('slice', '').replace('.png', '') # cv2.imwrite('../COVID/slice_{}.png'.format(slice_idx),raw_image) # print("Shape of load Image") # print(arr.shape) detection_out[i, :, :] = h5_raw_image attention_map[np.where(attention_map < args.attention_threshold)] = 0 # attention_out[i, :, :] = cv2.flip( cv2.resize(np.squeeze(np.uint8(attention_map * 255)), (origin_shape[1], origin_shape[0])), 0) attention_out[i, :, :] = cv2.resize(np.squeeze(np.uint8(attention_map * 255)), (512, 512)) masks[masks < args.segmentation_threshold] = 0 filter_mask[filter_mask < args.segmentation_threshold] = 0 filter_mask = cv2.resize(np.squeeze(np.uint8(filter_mask * 255)), (512, 512)) masks = cv2.resize(np.squeeze(np.uint8(masks * 255)), (512, 512)) # mask_out[i, :, :] = masks mask_out[i, :, :] = filter_mask if save_path is not None and args.save_result == 1: print('Writing Results...') # detection_out = sitk.GetImageFromArray(detection_out) # sitk.WriteImage(detection_out, os.path.join(save_path, '{}_{}_detection_result.nii.gz'.format(client_name, patient_name))) # attention_out = sitk.GetImageFromArray(attention_out) # sitk.WriteImage(attention_out, os.path.join(save_path, '{}_{}_attention_result.nii.gz'.format(client_name, patient_name))) mask_out = sitk.GetImageFromArray(mask_out) sitk.WriteImage(mask_out, os.path.join(save_path, '{}_{}_masks_result.nii.gz'.format(client_name, patient_name))) np.save(os.path.join(save_path, '{}_{}_prediction.npy'.format(client_name, patient_name)), all_detections) all_annotations, all_annotations_img_path = _get_annotations_and_img_path(generator) np.save(os.path.join(save_path, '{}_{}_annotations.npy'.format(client_name, patient_name)), all_annotations) np.save(os.path.join(save_path, '{}_{}_annotations_img_path.npy'.format(client_name, patient_name)), all_annotations_img_path) return 0 def evaluate_from_npy( args, client_name, patient_name, iou_threshold=0.5, score_threshold=0.05, max_detections=100, save_path=None, verbose=1, ): """ Evaluate a given dataset using a given model. # Arguments iou_threshold : The threshold used to consider when a detection is positive or negative. score_threshold : The score confidence threshold to use for detections. max_detections : The maximum number of detections to use per image. save_path : The path to save images with visualized detections to. # Returns A dict mapping class names to mAP scores. """ # gather all detections and annotations if args.reduce_fp: all_detections = np.load(os.path.join(save_path, '{}_{}_prediction_fp_reduced.npy'.format(client_name, patient_name)), allow_pickle=True) else: all_detections = np.load(os.path.join(save_path, '{}_{}_prediction.npy'.format(client_name, patient_name)), allow_pickle=True) # all_detections = np.load(os.path.join(save_path, '{}_{}_prediction.npy'.format(client_name, patient_name)), allow_pickle=True) all_annotations = np.load(os.path.join(save_path, '{}_{}_annotations.npy'.format(client_name, patient_name)), allow_pickle=True) all_annotations_img_path = np.load(os.path.join(save_path, '{}_{}_annotations_img_path.npy'.format(client_name, patient_name)), allow_pickle=True) all_fp_detections = [[] for j in range(all_annotations.shape[0])] if not args.score_loop: auc_ci_csv_path = os.path.join(save_path, '{}_{}_score_{}_AUC_CI_bbox.csv'.format(client_name, patient_name, score_threshold)) tar_bbox_csv_path = os.path.join(save_path, '{}_{}_score_thres_{}_bbox.csv'.format(client_name, patient_name, score_threshold)) tar_anno_csv_path = os.path.join(save_path, '{}_{}_score_thres_{}_anno.csv'.format(client_name, patient_name, score_threshold)) bbox_output = open(tar_bbox_csv_path, 'w', newline='') bbox_writer = csv.writer(bbox_output, delimiter=',') anno_output = open(tar_anno_csv_path, 'w', newline='') anno_writer = csv.writer(anno_output, delimiter=',') auc_output = open(auc_ci_csv_path, 'w', newline='') auc_writer = csv.writer(auc_output, delimiter=',') # print (all_detections[0][0].shape) # print (all_detections[0]) # print (all_annotations.shape) # print('all detections:', all_detections) # print('all all_annotations:', all_annotations) average_precisions = {} for label in range(1): false_positives = np.zeros((0,)) true_positives = np.zeros((0,)) false_negatives = np.zeros((0,)) scores = np.zeros((0,)) num_annotations = 0.0 fp_all = {} tp_all = {} hitter_all = {} # print('---slices num---') # print(all_annotations.shape[0]) for i in range(all_annotations.shape[0]): detections = all_detections[i][label] detections = detections[detections[:, -1] >= score_threshold] annotations = all_annotations[i][label] num_annotations += annotations.shape[0] detected_annotations = [] wrote_annotations = [] # print('slice{}'.format(i)) # print(annotations) slice_fp_detections = np.empty([0, 5], dtype=np.float32) hitter = np.zeros(annotations.shape[0]) fp = 0 for d in detections: # print('#############each detection##########') if annotations.shape[0] == 0: continue ious, overlaps_pre_arr, overlaps_gt_arr = compute_overlap(np.expand_dims(d, axis=0), annotations) # print('---ious--') # print(ious) # print('--overlaps_pre_arr--') # print(overlaps_pre_arr) # print('--overlaps_gt_arr--') # print(overlaps_gt_arr) assigned_annotation = np.argmax(ious, axis=1) max_overlap = ious[0, assigned_annotation] # print(assigned_annotation) # print(max_overlap) if max_overlap >= iou_threshold: if hitter[assigned_annotation] == 0: scores = np.append(scores, d[4]) false_positives = np.append(false_positives, 0) true_positives = np.append(true_positives, 1) if not args.score_loop: auc_writer.writerow([all_annotations_img_path[i], int(d[0]), int(d[1]), int(d[2]), int(d[3])] + ['lesion'] + [float(d[4])] + ['max_overlap']) hitter[assigned_annotation] += 1 if not args.score_loop: bbox_writer.writerow([all_annotations_img_path[i], int(d[0]), int(d[1]), int(d[2]), int(d[3])] + ['lesion'] + [float(d[4])] + ['max_overlap']) else: assigned_annotation_pre = np.where(overlaps_pre_arr > iou_threshold) # assigned_annotation_pre = np.where(overlaps_pre_arr > 0.6) assigned_annotation_gt = np.where(overlaps_gt_arr > iou_threshold) # assigned_annotation_gt = np.where(overlaps_gt_arr > 0.6) # print('--assigned_annotation_pre--') # print(assigned_annotation_pre) # print(len(assigned_annotation_pre)) # print(len(assigned_annotation_pre[0])) # print('--assigned_annotation_gt--') # print(assigned_annotation_gt) # print(len(assigned_annotation_gt)) # print(len(assigned_annotation_gt[0])) if len(assigned_annotation_pre[0]) > 0: if not args.score_loop: bbox_writer.writerow([all_annotations_img_path[i], int(d[0]), int(d[1]), int(d[2]), int(d[3])] + ['lesion'] + [float(d[4])] + ['assigned_pre']) for index in assigned_annotation_pre[1]: if hitter[index] == 0: scores = np.append(scores, d[4]) false_positives = np.append(false_positives, 0) true_positives = np.append(true_positives, 1) if not args.score_loop: auc_writer.writerow([all_annotations_img_path[i], int(d[0]), int(d[1]), int(d[2]), int(d[3])] + ['lesion'] + [float(d[4])] + ['assigned_pre']) hitter[index] += 1 if len(assigned_annotation_gt[0]) > 0: if not args.score_loop: bbox_writer.writerow([all_annotations_img_path[i], int(d[0]), int(d[1]), int(d[2]), int(d[3])] + ['lesion'] + [float(d[4])] + ['assigned_gt']) for index in assigned_annotation_gt[1]: if hitter[index] == 0: scores = np.append(scores, d[4]) false_positives = np.append(false_positives, 0) true_positives = np.append(true_positives, 1) if not args.score_loop: auc_writer.writerow([all_annotations_img_path[i], int(d[0]), int(d[1]), int(d[2]), int(d[3])] + ['lesion'] + [float(d[4])] + ['assigned_gt']) hitter[index] += 1 if len(assigned_annotation_pre[0]) + len(assigned_annotation_gt[0]) == 0: fp += 1 scores = np.append(scores, d[4]) false_positives = np.append(false_positives, 1) true_positives = np.append(true_positives, 0) slice_fp_detections = np.concatenate((slice_fp_detections, np.expand_dims(d, axis=0)), axis=0) if not args.score_loop: bbox_writer.writerow([all_annotations_img_path[i], int(d[0]), int(d[1]), int(d[2]), int(d[3])] + ['lesion'] + [float(d[4])] + ['fp']) auc_writer.writerow([all_annotations_img_path[i], int(d[0]), int(d[1]), int(d[2]), int(d[3])] + ['lesion'] + [float(d[4])] + ['fp']) # assigned_annotation = np.argmax(ious, axis=1) # max_overlap = ious[0, assigned_annotation] # if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations: # false_positives = np.append(false_positives, 0) # true_positives = np.append(true_positives, 1) # detected_annotations.append(assigned_annotation) # elif max_overlap < iou_threshold: # false_positives = np.append(false_positives, 1) # true_positives = np.append(true_positives, 0) # else: # false_positives = np.append(false_positives, 0) # true_positives = np.append(true_positives, 0) all_fp_detections[i] = slice_fp_detections for each_anno in range(len(hitter)): # print(len(annotations)) if len(annotations) > 0: anno = annotations[each_anno] if not args.score_loop: anno_writer.writerow([all_annotations_img_path[i], int(anno[0]), int(anno[1]), int(anno[2]), int(anno[3])] + ['lesion'] + [int(hitter[each_anno])]) if hitter[each_anno] == 0: if not args.score_loop: auc_writer.writerow([all_annotations_img_path[i], int(anno[0]), int(anno[1]), int(anno[2]), int(anno[3])] + ['lesion'] + [0] + ['gt_not_hit']) # print('--hitter--') # print(hitter) # print('--where hitter > 0--') # print(np.where(hitter > 0)) hitter_all[i] = hitter tp_all[i] = len(np.where(hitter > 0)[0]) fp_all[i] = fp # no annotations -> AP for this class is 0 (is this correct?) if num_annotations == 0: average_precisions[label] = 0, 0 continue if not args.score_loop: bbox_output.flush() bbox_output.close() anno_output.flush() anno_output.close() auc_output.flush() auc_output.close() before_reduce_fp = 0 for i in range(len(all_fp_detections)): before_reduce_fp += len(all_fp_detections[i]) # reduce fp in detections deleted_all_fp_detections = fp_reduce(all_fp_detections) deleted_fp_num = 0 # after_reduce_fp = 0 for i in range(len(deleted_all_fp_detections)): # if len(deleted_all_fp_detections[i]) > 0: # print('deleted in deleted_all_fp', deleted_all_fp_detections[i]) deleted_fp_num += len(deleted_all_fp_detections[i]) TP_ALL = 0 FP_ALL = 0 for key in tp_all.keys(): TP_ALL += tp_all[key] for key in fp_all.keys(): FP_ALL += fp_all[key] FP_ALL -= deleted_fp_num new_TP_slice = TP_ALL / all_annotations.shape[0] new_FP_slice = FP_ALL / all_annotations.shape[0] new_Sensitivity = TP_ALL / num_annotations new_Precision = TP_ALL / (TP_ALL + FP_ALL) if (TP_ALL + FP_ALL) > 0 else 1 # print('num_annotations',num_annotations) # print('all_annotationsa',all_annotations.shape[0]) # print(num_annotations) # print(all_detections[0][label].shape) # print(all_detections[0][label]) new_res = [TP_ALL,FP_ALL, new_Sensitivity, new_Precision, new_FP_slice] # sort by score # print('before sort') # print(sum(true_positives)) # print(sum(false_positives)) indices = np.argsort(-scores) false_positives = false_positives[indices] true_positives = true_positives[indices] # print('length of old tp list and fp list') # print(len(true_positives),"----" ,len(false_positives)) TP = sum(true_positives) FP = sum(false_positives) FN = sum(false_negatives) # compute false positives and true positives false_positives = np.cumsum(false_positives) true_positives = np.cumsum(true_positives) # print(false_positives) # print(true_positives) # compute recall and precision recall = true_positives / num_annotations precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps) TP_slice = TP/all_annotations.shape[0] FP_slice = FP/all_annotations.shape[0] Sensitivity = TP / num_annotations Precision = TP / (TP + FP) if (TP + FP) > 0 else 1 if verbose==1: print('------{} {}------'.format(client_name, patient_name)) print(' # New_TP:{} New_FP{} before_reduce:{}'.format(TP_ALL, FP_ALL, FP_ALL + deleted_fp_num)) print(' # FP/slice:{:.4f} Sensitivity:{:.5f} Precision:{:.5f}'.format(new_FP_slice, new_Sensitivity, new_Precision)) # print('-------old--------') # print(' TP:{} FP{}'.format(TP,FP)) # print(' FP/slice:{:.4f} Sensitivity:{:.5f} Precision:{:.5f}'.format(FP_slice, Sensitivity, Precision)) old_res = [TP,FP,
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2012 pyReScene # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation # files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, # copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following # conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. from __future__ import (with_statement, unicode_literals, print_function, absolute_import) import unittest import shutil import pprint from filecmp import cmp from os.path import join from tempfile import mkdtemp import sys import struct import rescene from rescene.main import * from rescene.main import _handle_rar, _flag_check_srr, _auto_locate_renamed from rescene.rar import ArchiveNotFoundError from rescene import rar try: # Python < 3 from StringIO import StringIO # Supports writing non-Unicode strings except ImportError: # Python 3 from io import StringIO # for running nose tests os.chdir(os.path.dirname(os.path.abspath(__file__))) class TestInit(unittest.TestCase): def setUp(self): self.o = Observer() subscribe(self.o) # directory to place temporarily files: users home dir self.test_dir = os.path.expanduser('~') # some supplied files to work with self.files_dir = os.path.join(os.pardir, os.pardir, "test_files") self.little = os.path.join(self.files_dir, "store_little") self.newrr = os.path.join(self.files_dir, "store_rr_solid_auth_unicode_new") self.oldfolder = os.path.join(self.files_dir, "store_split_folder_old_srrsfv_windows") self.utfunix = os.path.join(self.files_dir, "store_utf8_comment") self.compression = os.path.join(self.files_dir, "best_little") self.txt = os.path.join(self.files_dir, "txt") def _print_events(self): for event in self.o.events: print(event.code), print(event.message) def _clear_events(self): self.o.events = [] class TmpDirSetup(TestInit): cleanup = True # do self.cleanup = False to prevent cleanup def setUp(self): super(TmpDirSetup, self).setUp() # temp dir to create files for tests self.tdir = mkdtemp(prefix="pyReScene-", dir=self.test_dir) assert self.o.events == [] def tearDown(self): """Delete the temporarily directory and its files.""" super(TmpDirSetup, self).tearDown() if self.cleanup: shutil.rmtree(self.tdir) class TestExtract(TmpDirSetup): """Test the extraction of additional files added to a srr file.""" def test_extract_srr_path(self): path = os.path.join(os.pardir, os.pardir, "test_files", "store_little") srr_file = os.path.join(path, "store_little_srrfile_with_path.srr") efile = os.path.join(self.tdir, "store_little", "store_little.srr") extract_files(srr_file, self.tdir) self.assertEqual(self.o.last_event().message[:11], "Re-creating") self.assertEqual(self.o.last_event().code, MsgCode.MSG) extract_files(srr_file, self.tdir) self.assertEqual(self.o.last_event().code, MsgCode.NO_OVERWRITE) self.assertEqual(self.o.last_event().message[:15], "Operation aborted"[:15]) # clean up but keep created directory -> os error expected try: os.remove(efile) except: pass extract_files(srr_file, self.tdir) self.assertEqual(self.o.last_event().code, MsgCode.OS_ERROR) self.assertTrue(os.path.isfile(efile)) def test_extract_srr_path_backslash(self): """Stored paths never use a \ for a file stored in a srr file, but it doesn't cause problems with ReScene .NET""" path = os.path.join(os.pardir, os.pardir, "test_files", "store_little") efile = os.path.join(self.tdir, "store_little", "store_little.srr") srr_file = os.path.join(path, "store_little_srrfile_with_path_backslash.srr") extract_files(srr_file, self.tdir) self.assertTrue(os.path.isfile(efile), "{0!r} should be a file".format(efile)) self.assertEqual(self.o.last_event().code, MsgCode.MSG) self.assertEqual(self.o.last_event().message[:11], "Re-creating") extract_files(srr_file, self.tdir) self.assertEqual(self.o.last_event().code, MsgCode.NO_OVERWRITE) self.assertEqual(self.o.last_event().message[:15], "Operation aborted"[:15]) def test_extract_srr_utf8(self): utf8 = "Κείμενο στην ελληνική γλώσσα.txt" temputf = os.path.join(self.tdir, utf8) origutf = os.path.join(self.txt, utf8) srr_file = os.path.join(self.utfunix, "utf8_filename_added.srr") extract_files(srr_file, self.tdir) # self.assertEqual(self.o.last_event().message[:10], "Recreating") self.assertTrue(os.path.isfile(temputf)) self.assertTrue(cmp(temputf, origutf), "Extracted file is bad.") os.remove(temputf) extract_files(srr_file, self.tdir, packed_name=utf8) self.assertTrue(cmp(temputf, origutf), "Extracted file is bad.") self.assertTrue(os.path.isfile(temputf)) def test_not_existing_name(self): """Do not extract anything when the provided file name is not included in the srr file.""" path = os.path.join(os.pardir, os.pardir, "test_files", "store_little") srr_file = os.path.join(path, "store_little_srrfile_with_path.srr") extract_files(srr_file, "", packed_name="fake_file") self.assertEqual(self.o.last_event().code, MsgCode.NO_EXTRACTION) class TestAddRemoveRenameError(TestInit): """Tests the errors of adding and removing stored files.""" def test_error_unknown_srr_file(self): self.assertRaises(ArchiveNotFoundError, add_stored_files, None, ()) self.assertRaises(ArchiveNotFoundError, remove_stored_files, None, None) self.assertRaises(ArchiveNotFoundError, rename_stored_file, None, "dummy", "dummy") def test_error_rar_for_srr(self): rar = os.path.join(self.little, "store_little.rar") self.assertRaises(NotSrrFile, add_stored_files, rar, ()) self.assertRaises(NotSrrFile, remove_stored_files, rar, None) self.assertRaises(NotSrrFile, rename_stored_file, rar, "dummy", "dummy") def test_error_dupe(self): srrp = os.path.join(self.little, "store_little_srrfile_with_path.srr") self.assertRaises(DupeFileName, add_stored_files, srrp, ["store_little/store_little.srr"]) def test_file_not_found(self): srr = os.path.join(self.little, "store_little.srr") self.assertRaises(FileNotFound, rename_stored_file, srr, "old name", "new name") class TestAddRemoveFiles(TmpDirSetup): def test_add_remove(self): # create srr file to add files too srrorig = os.path.join(self.little, "store_little.srr") srr = os.path.join(self.tdir, os.path.basename(srrorig)) _copy(srrorig, self.tdir) # NO PATHS # add all text files of the txt directory to the SRR file files = os.listdir(os.path.join(self.files_dir, "txt")) add_stored_files(srr, files) self.assertEqual(self.o.last_event().code, MsgCode.NO_FILES) add_stored_files(srr, files, os.path.join(self.files_dir, "txt")) files_srr = info(srr)["stored_files"] s = [v.file_name for _, v in files_srr.items()] files.sort() # don't create folders in the directory for this test s.sort() self.assertEqual(files, s, "File list not equal.") # Remove all added files again: equal with original file self.o.events = [] remove_stored_files(srr, s) # TODO: better tests! self.assertTrue(cmp(srr, srrorig), "Files not equivalent.") self.assertEqual(len(s), len(self.o.events), "Deletion events not " "equal to the number of files to be deleted.") self.assertEqual(MsgCode.DEL_STORED_FILE, self.o.last_event().code) # WITH PATHS root = os.path.join(self.files_dir, "txt") files = list(os.path.join(root, x) for x in os.listdir(root)) self.o.events = [] add_stored_files(srr, files, self.files_dir, True) files = sorted(os.path.relpath(x, self.files_dir).replace(os.sep, "/") for x in files) files_srr = info(srr)["stored_files"] s = [v.file_name for _, v in files_srr.items()] s.sort() # self.o.print_events() self.assertEqual(MsgCode.STORING, self.o.last_event().code) self.assertEqual(files, s, "File list not equal.") # paths must be POSIX for srr rr = RarReader(srr) for block in rr.read_all(): if block.rawtype == BlockType.SrrStoredFile: self.assertRaises(ValueError, block.file_name.index, "\\") # Remove all added files again: equal with original file self.o.events = [] remove_stored_files(srr, files) self.assertTrue(cmp(srr, srr), "Files not equivalent.") self.assertEqual(len(s), len(self.o.events), "Deletion events not " "equal to the number of files to be deleted.") self.assertEqual(MsgCode.DEL_STORED_FILE, self.o.last_event().code) class TestRename(TmpDirSetup): def test_rename(self): # create srr file to test rename on orig = os.path.join(self.little, "store_little_srrfile_with_path.srr") srr = os.path.join(self.tdir, os.path.basename(orig)) _copy(orig, self.tdir) # fname = RarReader(srr).list_files()[0] # print(fname) rename_stored_file(srr, "store_little/store_little.srr", "store_little/store_little_renamed.srr") RarReader(srr).read_all() rename_stored_file(srr, "store_little/store_little_renamed.srr", "store_little/store_little.srr") RarReader(srr).read_all() self.assertTrue(cmp(srr, orig), "Files not equivalent.") # rename_stored_file(srr, "store_little.srr", # "store_little_renamed.srr") class TestHash(TestInit): def test_hash_capitals(self): """To compare with the PHP hash implementation""" d = join(os.pardir, os.pardir, "test_files", "hash_capitals") lower = join(d, "Parlamentet.S06E02.SWEDiSH-SQC_alllower.srr") capitals = join(d, "Parlamentet.S06E02.SWEDiSH-SQC_capitals.srr") hl = content_hash(lower) hc = content_hash(capitals) # print(hl) # 1baad396af00591a16cd9691f2ff11ccdde1dcb1 self.assertEqual(hl, hc) class TestDisplayInfo(TestInit): def test_mlkj(self): asap = os.path.join(os.pardir, os.pardir, "test_files", "other", "Game.of.Thrones.S01E07.HDTV.XviD-ASAP.srr") good = os.path.join(os.pardir, os.pardir, "test_files", "other", "Antz.1998.iNTERNAL.DVDRip.XviD-SLeTDiVX.srr") # Dexter.S05E02.iNTERNAL.720p.HDTV.x264-ORENJI # http://trac.videolan.org/vlc/ticket/4463 # http://trac.videolan.org/vlc/search?q=.rar+ # print_details(php) # pprint.pprint(info(php)) # pprint.pprint(info(srr)) # for block in RarReader(good): # pprint.pprint(block) # pprint.pprint(hex(block.flags)) # first.volume.HEAD_FLAG.set.for.rXX_UNP_VER.is.2.0.with.m0.not.2.9 def test_srr(self): antz = os.path.join(os.pardir, "test_files", "Antz.1998.iNTERNAL.DVDRip.XviD-SLeTDiVX.srr") # (appname, stored_files, rar_files, archived_files, recovery, # sfv_entities, sfv_comments) = info(antz) # # if False: # print(appname) # print(stored_files) # print(rar_files) # print(archived_files) # print(recovery) # # self.assertEqual(7320474, recovery.file_size) # reconstruct(antz, "", "C://Users//Me//Desktop", False, {}, # True, False, True) # self._print_events() def test_rr(self): solid = os.path.join(os.pardir, os.pardir, "test_files", "store_rr_solid_auth_unicode_new", "store_rr_solid_auth.part1.srr") r = info(solid) rar_files = r["rar_files"] archived_files = r["archived_files"] self.assertEqual(r["stored_files"], {}) rarfiles = [ 'store_rr_solid_auth.part1.rar', 'store_rr_solid_auth.part2.rar', 'store_rr_solid_auth.part3.rar', ] self.assertEqual(sorted(rar_files.keys()), rarfiles) self.assertEqual(rar_files['store_rr_solid_auth.part1.rar'].file_size, 33000) self.assertEqual(rar_files['store_rr_solid_auth.part2.rar'].file_size, 33000) self.assertEqual(rar_files['store_rr_solid_auth.part3.rar'].file_size, 17504) self.assertEqual(archived_files['users_manual4.00.txt'].file_size, 78667) self.assertEqual("663F4491", archived_files['Κείμενο στην ελληνική γλώσσα.txt'].crc32) self.assertEqual(3 * (2 * 512) + 2 * (63 * 2) + 32 * 2, r["recovery"].file_size) self.assertEqual("ReScene .NET 1.2", r["appname"]) def test_comment(self): comment = os.path.join(os.pardir, os.pardir, "test_files", "store_utf8_comment", "store_utf8_comment.srr") # "win_comment.rar" # info(comment) # print_details(comment) def test_details(self): """Exercise main.print_details()""" srr = os.path.join(os.pardir, os.pardir, "test_files", "other", "house.713.hdtv-lol.srr") orig_stdout = sys.stdout try: sys.stdout = StringIO() print_details(srr) finally: sys.stdout = orig_stdout class TestCreate(TmpDirSetup): """Tests the creation of SRR files.""" def test_new_rr(self): """Basic SRR creation. No files to store.""" sfv = os.path.join(self.newrr, "store_rr_solid_auth.sfv") srr = os.path.join(self.newrr, "store_rr_solid_auth.part1.srr") rar = os.path.join(self.newrr, "store_rr_solid_auth.part1.rar") rescene.APPNAME = _get_appname(srr) # FROM SFV dest = os.path.join(self.tdir, "newrr_sfv.srr") create_srr(dest, sfv, oso_hash=False) self.assertEqual(MsgCode.STORING, self.o.events[0].code) # sfv also has .srr file included self.assertEqual(MsgCode.NO_RAR, self.o.events[1].code) # copy original and add .sfv to original before checking correctness origcopy = _copy(srr, self.tdir) add_stored_files(origcopy, sfv) self.assertTrue(cmp(origcopy, dest), "Files not equivalent.") # FROM RAR self._clear_events() assert len(self.o.events) == 0 dest = os.path.join(self.tdir, "newrr_rar.srr") create_srr(dest, rar, oso_hash=False) self.assertEqual(MsgCode.NO_FILES, self.o.events[0].code) self.assertEqual(MsgCode.MSG, self.o.events[1].code) self.assertTrue(cmp(srr, dest), "Files not equivalent.") def test_old_folder(self): """Folder support.""" sfv = os.path.join(self.oldfolder, "store_split_folder.sfv") srr = os.path.join(self.oldfolder, "store_split_folder.srr") rar = os.path.join(self.oldfolder, "store_split_folder.rar") rescene.APPNAME = _get_appname(srr) origcopy = _copy(srr, self.tdir) # FROM SFV dest = os.path.join(self.tdir, "oldfolder_sfv.srr") create_srr(dest, sfv, oso_hash=False) self.assertEqual(MsgCode.STORING, self.o.events[0].code) self.assertTrue(cmp(origcopy, dest), "Files not equivalent.") # FROM RAR self._clear_events() dest = os.path.join(self.tdir, "oldfolder_rar.srr") create_srr(dest, rar, oso_hash=False) self.assertEqual(MsgCode.NO_FILES, self.o.events[0].code) self.assertEqual(MsgCode.MSG, self.o.events[1].code) remove_stored_files(origcopy, os.path.basename(sfv)) self.assertTrue(cmp(origcopy, dest), "Files not equivalent.") def test_utf_unix(self): srr = os.path.join(self.utfunix, "store_utf8_comment.srr") rar = os.path.join(self.utfunix, "store_utf8_comment.rar") rescene.APPNAME = _get_appname(srr) origcopy = _copy(srr, self.tdir) dest = os.path.join(self.tdir, "utf_unix_rar.srr") create_srr(dest, rar) self.assertEqual(MsgCode.NO_FILES, self.o.events[0].code) self.assertEqual(MsgCode.MSG, self.o.events[1].code) self.assertTrue(cmp(origcopy, dest), "Files not equivalent.") def test_compressed(self): rar = os.path.join(self.compression, "best_little.rar") dest = os.path.join(self.tdir, "compression.srr") # self.assertRaises(ValueError, create_srr, dest, rar) # self.assertEqual(MsgCode.FBLOCK, self.o.last_event().code) self.assertTrue(create_srr(dest, rar, compressed=True)) # self._print_events() self.assertEqual(MsgCode.BLOCK, self.o.last_event().code) def test_osohash_path(self): """Test OSO hash calculation of file with path""" # Create a test Rar file storing an uncompressed data file. # The data file must be at least 64 KiB # for OSO hashing to work. rarpath = os.path.join(self.tdir, "test.rar") with open(rarpath, "wb") as file: file.write(rar.RAR_MARKER_BLOCK) block = rar.RarBlock.__new__(rar.RarBlock) block.crc = 0 # Dummy value; not verified block.rawtype = rar.BlockType.RarVolumeHeader block.flags = 0 res = bytearray(2 + 4) block._write_header(rar.HEADER_LENGTH + len(res)) file.write(block.block_bytes()) file.write(res) block.rawtype = rar.BlockType.RarPackedFile block.flags = 0 datasize = 128 * 1024 datapath = "dir\\datafile.mkv" # works only for video files pathbytes = datapath.encode("ascii") header = struct.pack(str("<IIBIIBBHI"), datasize, datasize, # Packed, unpacked 0, 0, 0, # OS, CRC, timestamp 0, # Rar version rar.COMPR_STORING, len(pathbytes), 0, # File attributes ) header += pathbytes block._write_header(rar.HEADER_LENGTH + len(header)) file.write(block.block_bytes()) file.write(header) file.write(bytearray(datasize)) # Create an SRR file from the Rar file srr = os.path.join(self.tdir, "test.srr") self.assertTrue(create_srr(srr, rarpath, oso_hash=True)) # Verify that the correct OSO hash is stored, # and that just the base name of the file is recorded expected = ("datafile.mkv", "0000000000020000", datasize) self.assertEqual([expected], info(srr)["oso_hashes"]) def _copy(cfile, destination_dir): """Copies 'cfile' to 'destination_dir'. Returns path of new file. Removes read-only tag to enable cleanup afterwards.""" shutil.copy(cfile, destination_dir) origcopy = os.path.join(destination_dir, os.path.basename(cfile)) os.chmod(origcopy, 0o700) # remove read-only flag return origcopy class TestRebuild(TmpDirSetup): def test_file_not_found(self): srr = os.path.join(self.newrr, "store_rr_solid_auth.part1.srr") # file is in /txt/ a directory deeper self.assertRaises(FileNotFound, reconstruct, srr, self.files_dir, self.tdir) def test_new_rr(self): """Rar files with recovery record. SRR has no files stored. No folders in the rars.""" sfv = os.path.join(self.newrr, "store_rr_solid_auth.sfv") srr = os.path.join(self.newrr, "store_rr_solid_auth.part1.srr") rar1 = os.path.join(self.newrr, "store_rr_solid_auth.part1.rar") rar2 = os.path.join(self.newrr, "store_rr_solid_auth.part2.rar") rar3 = os.path.join(self.newrr, "store_rr_solid_auth.part3.rar") reconstruct(srr, self.files_dir, self.tdir, auto_locate_renamed=True) # self._print_events() _copy(srr, self.tdir) # is included in sfv _copy(sfv, self.tdir) # for checking by hand cmp1 = os.path.join(self.tdir, "store_rr_solid_auth.part1.rar") cmp2 = os.path.join(self.tdir, "store_rr_solid_auth.part2.rar") cmp3 = os.path.join(self.tdir, "store_rr_solid_auth.part3.rar") self.cleanup = False self.assertEqual(os.path.getsize(cmp1), os.path.getsize(rar1)) self.assertEqual(os.path.getsize(cmp2), os.path.getsize(rar2)) self.assertEqual(os.path.getsize(cmp3), os.path.getsize(rar3)) self.assertTrue(cmp(cmp1, rar1), "Files not equivalent.") self.assertTrue(cmp(cmp2, rar2), "Files not equivalent.") self.assertTrue(cmp(cmp3, rar3), "Files not equivalent.") self.cleanup =
is not None: value = os.environ[os_key] if value is None: value = default return value def initialize_plugin(self) -> None: """ Code to initialize the plugin """ swift_credentials = { "user_domain_name": self.get_config_value( "user_domain_name", "OS_USER_DOMAIN_NAME", default="default" ), "project_domain_name": self.get_config_value( "project_domain_name", "OS_PROJECT_DOMAIN_NAME", default="default" ), "password": self.get_config_value("password", "OS_PASSWORD"), } os_options: Dict[str, Any] = {} user_id = self.get_config_value("username", "OS_USER_ID", "OS_USERNAME") project = self.get_config_value( "project_name", "OS_PROJECT_NAME", "OS_TENANT_NAME" ) auth_url = self.get_config_value( "auth_url", "OS_AUTH_URL", "OS_AUTHENTICATION_URL" ) object_storage_url = self.get_config_value( "object_storage_url", "OS_STORAGE_URL" ) region = self.get_config_value("region", "OS_REGION_NAME") project_id = self.get_config_value("project_id", "OS_PROJECT_ID") if user_id: swift_credentials["username"] = user_id if project: swift_credentials["project_name"] = project os_options["project_name"] = project if object_storage_url: os_options["object_storage_url"] = object_storage_url if region: os_options["region_name"] = region if project_id: os_options["PROJECT_ID"] = project_id if auth_url: swift_credentials["auth_url"] = auth_url self.os_options = os_options self.auth = keystoneauth1.identity.v3.Password(**swift_credentials) self._test_connection() SwiftPath.register_backend(self) _SwiftAccessor.register_backend(self) global _swift_accessor _swift_accessor = _SwiftAccessor def get_lock(self, path: Optional[str] = None) -> SwiftFileLock: """ Retrieve the appropriate `FileLock` backend for this storage plugin :param str path: The path to use for locking :return: A `FileLock` backend for obtaining locks :rtype: SwiftFileLock """ if path is None: path = str(self.mirror_base_path / ".lock") return SwiftFileLock(path, backend=self) def _test_connection(self) -> None: with self.connection() as conn: try: resp_headers, containers = conn.get_account() except keystoneauth1.exceptions.catalog.EndpointNotFound as exc: logger.exception("Failed authenticating to swift.", exc_info=exc) else: logger.info( "Validated swift credentials, successfully connected to swift!" ) return def _get_session(self) -> keystoneauth1.session.Session: return keystoneauth1.session.Session(auth=self.auth) @property def default_container(self) -> str: try: return self.configuration["swift"]["default_container"] except KeyError: return "bandersnatch" @contextlib.contextmanager def connection(self) -> Generator[swiftclient.client.Connection, None, None]: with contextlib.closing( swiftclient.client.Connection( session=self._get_session(), os_options=self.os_options ) ) as swift_conn: yield swift_conn def get_container(self, container: Optional[str] = None) -> List[Dict[str, str]]: """ Given the name of a container, return its contents. :param str container: The name of the desired container, defaults to :attr:`~SwiftStorage.default_container` :return: A list of objects in the container if it exists :rtype: List[Dict[str, str]] Example: >>> plugin.get_container("bandersnatch") [{ 'bytes': 1101, 'last_modified': '2020-02-27T19:10:17.922970', 'hash': 'a76b4c69bfcf82313bbdc0393b04438a', 'name': 'packages/pyyaml/PyYAML-5.3/LICENSE', 'content_type': 'application/octet-stream' }, { 'bytes': 1779, 'last_modified': '2020-02-27T19:10:17.845520', 'hash': 'c60081e1ad65830b098a7f21a8a8c90e', 'name': 'packages/pyyaml/PyYAML-5.3/PKG-INFO', 'content_type': 'application/octet-stream' }, { 'bytes': 1548, 'last_modified': '2020-02-27T19:10:17.730490', 'hash': '9a8bdf19e93d4b007598b5eb97b461eb', 'name': 'packages/pyyaml/PyYAML-5.3/README', 'content_type': 'application/octet-stream' }, ... ] """ if not container: container = self.default_container with self.connection() as conn: return conn.get_container(container) # type: ignore def get_object(self, container_name: str, file_path: str) -> bytes: """Retrieve an object from swift, base64 decoding the contents.""" with self.connection() as conn: try: _, file_contents = conn.get_object(container_name, file_path) except swiftclient.exceptions.ClientException: raise FileNotFoundError(file_path) else: if len(file_contents) % 4 == 0 and BASE64_RE.fullmatch(file_contents): return base64.b64decode(file_contents) return bytes(file_contents) def walk( self, root: PATH_TYPES, dirs: bool = True, conn: Optional[swiftclient.client.Connection] = None, ) -> List[SwiftPath]: results: List[SwiftPath] = [] with contextlib.ExitStack() as stack: if conn is None: conn = stack.enter_context(self.connection()) paths = conn.get_container(self.default_container, prefix=str(root)) for p in paths: if "subdir" in p and dirs: results.append(self.PATH_BACKEND(p["subdir"])) else: results.append(self.PATH_BACKEND(p["name"])) return results def find(self, root: PATH_TYPES, dirs: bool = True) -> str: """A test helper simulating 'find'. Iterates over directories and filenames, given as relative paths to the root. """ results = self.walk(root, dirs=dirs) results.sort() return "\n".join(str(result.relative_to(root)) for result in results) def compare_files(self, file1: PATH_TYPES, file2: PATH_TYPES) -> bool: """Compare two files, returning true if they are the same and False if not.""" file1_contents = self.read_file(file1, text=False) file2_contents = self.read_file(file2, text=False) assert isinstance(file1_contents, bytes) assert isinstance(file2_contents, bytes) file1_hash = hashlib.sha256(file1_contents).hexdigest() file2_hash = hashlib.sha256(file2_contents).hexdigest() return file1_hash == file2_hash @contextlib.contextmanager def rewrite( self, filepath: PATH_TYPES, mode: str = "w", **kw: Any ) -> Generator[IO, None, None]: """Rewrite an existing file atomically to avoid programs running in parallel to have race conditions while reading.""" # TODO: Account for alternative backends if isinstance(filepath, str): filename = os.path.basename(filepath) else: filename = filepath.name # Change naming format to be more friendly with distributed POSIX # filesystems like GlusterFS that hash based on filename # GlusterFS ignore '.' at the start of filenames and this avoid rehashing with tempfile.NamedTemporaryFile( mode=mode, prefix=f".{filename}.", delete=False, **kw ) as f: filepath_tmp = f.name yield f if not os.path.exists(filepath_tmp): # Allow our clients to remove the file in case it doesn't want it to be # put in place actually but also doesn't want to error out. return os.chmod(filepath_tmp, 0o100644) shutil.move(filepath_tmp, filepath) @contextlib.contextmanager def update_safe(self, filename: PATH_TYPES, **kw: Any) -> Generator[IO, None, None]: """Rewrite a file atomically. Clients are allowed to delete the tmpfile to signal that they don't want to have it updated. """ with tempfile.NamedTemporaryFile( delete=False, prefix=f"{os.path.basename(filename)}.", **kw, ) as tf: tf.has_changed = False # type: ignore yield tf if not os.path.exists(tf.name): return local_filename_tmp = pathlib.Path(tf.name) filename_tmp = SwiftPath(f"{os.path.dirname(filename)}/{tf.name}") self.copy_local_file(str(local_filename_tmp), str(filename_tmp)) local_filename_tmp.unlink() if self.exists(filename) and self.compare_files( str(filename_tmp), str(filename) ): self.delete_file(filename_tmp) else: self.move_file(filename_tmp, filename) tf.has_changed = True # type: ignore def copy_local_file(self, source: PATH_TYPES, dest: PATH_TYPES) -> None: """Copy the contents of a local file to a destination in swift""" with open(source, "rb") as fh: self.write_file(str(dest), fh.read()) return def copy_file( self, source: PATH_TYPES, dest: PATH_TYPES, dest_container: Optional[str] = None ) -> None: """Copy a file from **source** to **dest**""" if dest_container is None: dest_container = self.default_container dest = f"{dest_container}/{dest}" with self.connection() as conn: conn.copy_object(self.default_container, str(source), dest) return def move_file( self, source: PATH_TYPES, dest: PATH_TYPES, dest_container: Optional[str] = None ) -> None: """Move a file from **source** to **dest**""" if dest_container is None: dest_container = self.default_container dest = f"{dest_container}/{dest}" with self.connection() as conn: conn.copy_object(self.default_container, str(source), dest) try: conn.delete_object(self.default_container, str(source)) except swiftclient.exceptions.ClientException: raise FileNotFoundError(str(source)) return def write_file( self, path: PATH_TYPES, contents: Union[str, bytes, IO], encoding: Optional[str] = None, errors: Optional[str] = None, ) -> None: """Write data to the provided path. If **contents** is a string, the file will be opened and written in "r" + "utf-8" mode, if bytes are supplied it will be accessed using "rb" mode (i.e. binary write).""" if encoding is not None: if errors is None: try: errors = sys.getfilesystemencodeerrors() except AttributeError: errors = "surrogateescape" if isinstance(contents, str): contents = contents.encode(encoding=encoding, errors=errors) elif isinstance(contents, bytes): contents = contents.decode(encoding=encoding, errors=errors) with self.connection() as conn: conn.put_object(self.default_container, str(path), contents) return @contextlib.contextmanager def open_file( self, path: PATH_TYPES, text: bool = True ) -> Generator[IO, None, None]: """Yield a file context to iterate over. If text is false, open the file with 'rb' mode specified.""" wrapper = io.StringIO if text else io.BytesIO content: IO = wrapper(self.read_file(path, text=text)) yield content def read_file( self, path: PATH_TYPES, text: bool = True, encoding: str = "utf-8", errors: Optional[str] = None, ) -> Union[str, bytes]: """Return the contents of the requested file, either a a bytestring or a unicode string depending on whether **text** is True""" content: Union[str, bytes] if not errors: try: errors = sys.getfilesystemencodeerrors() except AttributeError: errors = "surrogateescape" kwargs: Dict[str, Any] = {} if errors: kwargs["errors"] = errors content = self.get_object(self.default_container, str(path)) if text and isinstance(content, bytes): content = content.decode(encoding=encoding, **kwargs) return content def delete_file(self, path: PATH_TYPES, dry_run: bool = False) -> int: """Delete the provided path, recursively if necessary.""" if not isinstance(path, pathlib.Path): path = pathlib.Path(path) log_prefix = "[DRY RUN] " if dry_run else "" with self.connection() as conn: logger.info(f"{log_prefix}Deleting item from object storage: {path}") if not dry_run: try: conn.delete_object(self.default_container, path.as_posix()) except swiftclient.exceptions.ClientException: raise FileNotFoundError(path.as_posix()) return 0 def mkdir( self, path: PATH_TYPES, exist_ok: bool = False, parents: bool = False ) -> None: """ Create the provided directory This operation is a no-op on swift. """ logger.warning( f"Creating directory in object storage: {path} with .swiftkeep file" ) if not isinstance(path, self.PATH_BACKEND): path = self.PATH_BACKEND(path) path.joinpath(".swiftkeep").touch() def rmdir( self, path: PATH_TYPES, recurse: bool = False, force: bool = False, ignore_errors: bool = False, dry_run: bool = False, ) -> int: """ Remove the directory. If recurse is True, allow removing empty children. If force is true, remove contents destructively. """ if not force: if not isinstance(path, self.PATH_BACKEND): path = self.PATH_BACKEND(path) contents = list(path.iterdir(include_swiftkeep=True, recurse=True)) if contents and all(p.name == ".swiftkeep" for p in contents): for p in contents: if p.name == ".swiftkeep": p.unlink() raise OSError( "Object container directories are auto-destroyed when they are emptied" ) target_path = str(path) if target_path == ".": target_path
<filename>fairSMOTE/fairsmote.py from __future__ import print_function, division from sklearn.neighbors import NearestNeighbors as NN from aif360.datasets import StandardDataset from sklearn.linear_model import LogisticRegression import pandas as pd import random class Fairsmote: def __init__(self,df,protected_attribute,df_name): self.df = df self.df_name = df_name self.protected_attribute = protected_attribute def run_fairsmote(self): dataset_orig_train = self.df.convert_to_dataframe()[0] if self.df_name == "adult": if self.protected_attribute == "sex": zero_zero = len(dataset_orig_train[(dataset_orig_train['Income Binary'] == 0) & (dataset_orig_train[self.protected_attribute] == 0)]) zero_one = len(dataset_orig_train[(dataset_orig_train['Income Binary'] == 0) & (dataset_orig_train[self.protected_attribute] == 1)]) one_zero = len(dataset_orig_train[(dataset_orig_train['Income Binary'] == 1) & (dataset_orig_train[self.protected_attribute] == 0)]) one_one = len(dataset_orig_train[(dataset_orig_train['Income Binary'] == 1) & (dataset_orig_train[self.protected_attribute] == 1)]) # Sort these four maximum = max(zero_zero,zero_one,one_zero,one_one) if maximum == zero_zero: print("zero_zero is maximum") if maximum == zero_one: print("zero_one is maximum") if maximum == one_zero: print("one_zero is maximum") if maximum == one_one: print("one_one is maximum") zero_zero_to_be_incresed = maximum - zero_zero ## where both are 0 one_zero_to_be_incresed = maximum - one_zero ## where class is 1 attribute is 0 one_one_to_be_incresed = maximum - one_one ## where class is 1 attribute is 1 df_zero_zero = dataset_orig_train[(dataset_orig_train['Income Binary'] == 0) & (dataset_orig_train[self.protected_attribute] == 0)] df_one_zero = dataset_orig_train[(dataset_orig_train['Income Binary'] == 1) & (dataset_orig_train[self.protected_attribute] == 0)] df_one_one = dataset_orig_train[(dataset_orig_train['Income Binary'] == 1) & (dataset_orig_train[self.protected_attribute] == 1)] df_zero_zero = self.generate_samples(zero_zero_to_be_incresed,df_zero_zero,'Adult') df_one_zero = self.generate_samples(one_zero_to_be_incresed,df_one_zero,'Adult') df_one_one = self.generate_samples(one_one_to_be_incresed,df_one_one,'Adult') df = df_zero_zero.append(df_one_zero) df = df.append(df_one_one) df['race'] = df['race'].astype(float) df['sex'] = df['sex'].astype(float) df_zero_one = dataset_orig_train[(dataset_orig_train['Income Binary'] == 0) & (dataset_orig_train[self.protected_attribute] == 1)] df = df.append(df_zero_one) self.df = df self.df = self.situation_testing(self.df,"Income Binary","sex") self.df = StandardDataset( df=df, label_name='Income Binary', protected_attribute_names=['sex'], favorable_classes=[1], privileged_classes=[[1]]) if self.protected_attribute == "race": # Find Class & Protected attribute Distribution # first one is class value and second one is protected attribute value zero_zero = len(dataset_orig_train[(dataset_orig_train['Income Binary'] == 0) & (dataset_orig_train[self.protected_attribute] == 0)]) zero_one = len(dataset_orig_train[(dataset_orig_train['Income Binary'] == 0) & (dataset_orig_train[self.protected_attribute] == 1)]) one_zero = len(dataset_orig_train[(dataset_orig_train['Income Binary'] == 1) & (dataset_orig_train[self.protected_attribute] == 0)]) one_one = len(dataset_orig_train[(dataset_orig_train['Income Binary'] == 1) & (dataset_orig_train[self.protected_attribute] == 1)]) # Sort these four maximum = max(zero_zero,zero_one,one_zero,one_one) if maximum == zero_zero: print("zero_zero is maximum") if maximum == zero_one: print("zero_one is maximum") if maximum == one_zero: print("one_zero is maximum") if maximum == one_one: print("one_one is maximum") zero_zero_to_be_incresed = maximum - zero_zero ## where both are 0 one_zero_to_be_incresed = maximum - one_zero ## where class is 1 attribute is 0 one_one_to_be_incresed = maximum - one_one ## where class is 1 attribute is 1 df_zero_zero = dataset_orig_train[(dataset_orig_train['Income Binary'] == 0) & (dataset_orig_train[self.protected_attribute] == 0)] df_one_zero = dataset_orig_train[(dataset_orig_train['Income Binary'] == 1) & (dataset_orig_train[self.protected_attribute] == 0)] df_one_one = dataset_orig_train[(dataset_orig_train['Income Binary'] == 1) & (dataset_orig_train[self.protected_attribute] == 1)] df_zero_zero = self.generate_samples(zero_zero_to_be_incresed,df_zero_zero,'Adult') df_one_zero = self.generate_samples(one_zero_to_be_incresed,df_one_zero,'Adult') df_one_one = self.generate_samples(one_one_to_be_incresed,df_one_one,'Adult') df = df_zero_zero.append(df_one_zero) df = df.append(df_one_one) df['race'] = df['race'].astype(float) df['sex'] = df['sex'].astype(float) df_zero_one = dataset_orig_train[(dataset_orig_train['Income Binary'] == 0) & (dataset_orig_train[self.protected_attribute] == 1)] df = df.append(df_zero_one) self.df = df self.df = self.situation_testing(self.df,"Income Binary","race") self.df = StandardDataset( df=df, label_name='Income Binary', protected_attribute_names=['race'], favorable_classes=[1], privileged_classes=[[1]]) if self.df_name == "compas": if self.protected_attribute == "sex": # Find Class & Protected attribute Distribution # first one is class value and second one is protected attribute value zero_zero = len(dataset_orig_train[(dataset_orig_train['two_year_recid'] == 0) & (dataset_orig_train[self.protected_attribute] == 0)]) zero_one = len(dataset_orig_train[(dataset_orig_train['two_year_recid'] == 0) & (dataset_orig_train[self.protected_attribute] == 1)]) one_zero = len(dataset_orig_train[(dataset_orig_train['two_year_recid'] == 1) & (dataset_orig_train[self.protected_attribute] == 0)]) one_one = len(dataset_orig_train[(dataset_orig_train['two_year_recid'] == 1) & (dataset_orig_train[self.protected_attribute] == 1)]) # Sort these four maximum = max(zero_zero,zero_one,one_zero,one_one) if maximum == zero_zero: print("zero_zero is maximum") if maximum == zero_one: print("zero_one is maximum") if maximum == one_zero: print("one_zero is maximum") if maximum == one_one: print("one_one is maximum") zero_one_to_be_incresed = maximum - zero_one ## where class is 0 attribute is 1 one_zero_to_be_incresed = maximum - one_zero ## where class is 1 attribute is 0 one_one_to_be_incresed = maximum - one_one ## where class is 1 attribute is 1 df_zero_one = dataset_orig_train[(dataset_orig_train['two_year_recid'] == 0) & (dataset_orig_train[self.protected_attribute] == 1)] df_one_zero = dataset_orig_train[(dataset_orig_train['two_year_recid'] == 1) & (dataset_orig_train[self.protected_attribute] == 0)] df_one_one = dataset_orig_train[(dataset_orig_train['two_year_recid'] == 1) & (dataset_orig_train[self.protected_attribute] == 1)] df_zero_one = self.generate_samples(zero_one_to_be_incresed,df_zero_one,'Compas') df_one_zero = self.generate_samples(one_zero_to_be_incresed,df_one_zero,'Compas') df_one_one = self.generate_samples(one_one_to_be_incresed,df_one_one,'Compas') df = df_zero_one.append(df_one_zero) df = df.append(df_one_one) df['race'] = df['race'].astype(float) df['sex'] = df['sex'].astype(float) df_zero_zero = dataset_orig_train[(dataset_orig_train['two_year_recid'] == 0) & (dataset_orig_train[self.protected_attribute] == 0)] df = df.append(df_zero_zero) self.df = df self.df = self.situation_testing(self.df,"two_year_recid","sex") self.df = StandardDataset( df=df, label_name='two_year_recid', protected_attribute_names=['sex'], favorable_classes=[0], privileged_classes=[[1]]) if self.protected_attribute == "race": # Find Class & Protected attribute Distribution # first one is class value and second one is protected attribute value zero_zero = len(dataset_orig_train[(dataset_orig_train['two_year_recid'] == 0) & (dataset_orig_train[self.protected_attribute] == 0)]) zero_one = len(dataset_orig_train[(dataset_orig_train['two_year_recid'] == 0) & (dataset_orig_train[self.protected_attribute] == 1)]) one_zero = len(dataset_orig_train[(dataset_orig_train['two_year_recid'] == 1) & (dataset_orig_train[self.protected_attribute] == 0)]) one_one = len(dataset_orig_train[(dataset_orig_train['two_year_recid'] == 1) & (dataset_orig_train[self.protected_attribute] == 1)]) # Sort these four maximum = max(zero_zero,zero_one,one_zero,one_one) if maximum == zero_zero: print("zero_zero is maximum") if maximum == zero_one: print("zero_one is maximum") if maximum == one_zero: print("one_zero is maximum") if maximum == one_one: print("one_one is maximum") zero_one_to_be_incresed = maximum - zero_one ## where class is 0 attribute is 1 zero_zero_to_be_incresed = maximum - zero_zero ## where class is 1 attribute is 0 one_one_to_be_incresed = maximum - one_one ## where class is 1 attribute is 1 df_zero_one = dataset_orig_train[(dataset_orig_train['two_year_recid'] == 0) & (dataset_orig_train[self.protected_attribute] == 1)] df_zero_zero = dataset_orig_train[(dataset_orig_train['two_year_recid'] == 0) & (dataset_orig_train[self.protected_attribute] == 0)] df_one_one = dataset_orig_train[(dataset_orig_train['two_year_recid'] == 1) & (dataset_orig_train[self.protected_attribute] == 1)] df_zero_one = self.generate_samples(zero_one_to_be_incresed,df_zero_one,'Compas') df_zero_zero = self.generate_samples(zero_zero_to_be_incresed,df_zero_zero,'Compas') df_one_one = self.generate_samples(one_one_to_be_incresed,df_one_one,'Compas') df = df_zero_one.append(df_zero_zero) df = df.append(df_one_one) df['race'] = df['race'].astype(float) df['sex'] = df['sex'].astype(float) df_one_zero = dataset_orig_train[(dataset_orig_train['two_year_recid'] == 1) & (dataset_orig_train[self.protected_attribute] == 0)] df = df.append(df_one_zero) self.df = df self.df = self.situation_testing(self.df,"two_year_recid","race") self.df = StandardDataset( df=df, label_name='two_year_recid', protected_attribute_names=['race'], favorable_classes=[0], privileged_classes=[[1]]) if self.df_name == "german": if self.protected_attribute == "sex": # Find Class & Protected attribute Distribution # first one is class value and second one is protected attribute value zero_zero = len(dataset_orig_train[(dataset_orig_train['credit'] == 2) & (dataset_orig_train[self.protected_attribute] == 0)]) zero_one = len(dataset_orig_train[(dataset_orig_train['credit'] == 2) & (dataset_orig_train[self.protected_attribute] == 1)]) one_zero = len(dataset_orig_train[(dataset_orig_train['credit'] == 1) & (dataset_orig_train[self.protected_attribute] == 0)]) one_one = len(dataset_orig_train[(dataset_orig_train['credit'] == 1) & (dataset_orig_train[self.protected_attribute] == 1)]) # Sort these four maximum = max(zero_zero,zero_one,one_zero,one_one) if maximum == zero_zero: print("zero_zero is maximum") if maximum == zero_one: print("zero_one is maximum") if maximum == one_zero: print("one_zero is maximum") if maximum == one_one: print("one_one is maximum") zero_zero_to_be_incresed = maximum - zero_zero ## where both are 0 zero_one_to_be_incresed = maximum - zero_one ## where class is 0 attribute is 1 one_zero_to_be_incresed = maximum - one_zero ## where class is 1 attribute is 0 df_zero_zero = dataset_orig_train[(dataset_orig_train['credit'] == 2) & (dataset_orig_train[self.protected_attribute] == 0)] df_zero_one = dataset_orig_train[(dataset_orig_train['credit'] == 2) & (dataset_orig_train[self.protected_attribute] == 1)] df_one_zero = dataset_orig_train[(dataset_orig_train['credit'] == 1) & (dataset_orig_train[self.protected_attribute] == 0)] df_zero_zero = self.generate_samples(zero_zero_to_be_incresed,df_zero_zero,'German') df_zero_one = self.generate_samples(zero_one_to_be_incresed,df_zero_one,'German') df_one_zero = self.generate_samples(one_zero_to_be_incresed,df_one_zero,'German') df = df_zero_zero.append(df_zero_one) df = df.append(df_one_zero) df['sex'] = df['sex'].astype(float) df_one_one = dataset_orig_train[(dataset_orig_train['credit'] == 1) & (dataset_orig_train[self.protected_attribute] == 1)] df = df.append(df_one_one) self.df = df self.df = self.situation_testing(self.df,"credit","sex") self.df = StandardDataset( df=df, label_name='credit', protected_attribute_names=['sex'], favorable_classes=[1], privileged_classes=[[1]]) if self.protected_attribute == "age": # Find Class & Protected attribute Distribution # first one is class value and second one is protected attribute value zero_zero = len(dataset_orig_train[(dataset_orig_train['credit'] == 2) & (dataset_orig_train[self.protected_attribute] == 0)]) zero_one = len(dataset_orig_train[(dataset_orig_train['credit'] == 2) & (dataset_orig_train[self.protected_attribute] == 1)]) one_zero = len(dataset_orig_train[(dataset_orig_train['credit'] == 1) & (dataset_orig_train[self.protected_attribute] == 0)]) one_one = len(dataset_orig_train[(dataset_orig_train['credit'] == 1) & (dataset_orig_train[self.protected_attribute] == 1)]) # Sort these four maximum = max(zero_zero,zero_one,one_zero,one_one) if maximum == zero_zero: print("zero_zero is maximum") if maximum == zero_one: print("zero_one is maximum") if maximum == one_zero: print("one_zero is maximum") if maximum == one_one: print("one_one is maximum") zero_zero_to_be_incresed = maximum - zero_zero ## where both are 0 zero_one_to_be_incresed = maximum - zero_one ## where class is 0 attribute is 1 one_zero_to_be_incresed = maximum - one_zero ## where class is 1 attribute is 0 df_zero_zero = dataset_orig_train[(dataset_orig_train['credit'] == 2) & (dataset_orig_train[self.protected_attribute] == 0)] df_zero_one = dataset_orig_train[(dataset_orig_train['credit'] == 2) & (dataset_orig_train[self.protected_attribute] == 1)] df_one_zero = dataset_orig_train[(dataset_orig_train['credit'] == 1) & (dataset_orig_train[self.protected_attribute] == 0)] df_zero_zero = self.generate_samples(zero_zero_to_be_incresed,df_zero_zero,'German') df_zero_one = self.generate_samples(zero_one_to_be_incresed,df_zero_one,'German') df_one_zero = self.generate_samples(one_zero_to_be_incresed,df_one_zero,'German') df = df_zero_zero.append(df_zero_one) df = df.append(df_one_zero) df['sex']
instance. ValueError If `root_path` was given as empty string. """ if root_path is None: try: maybe_module = sys.modules[import_name] except KeyError: pass else: maybe_file_name = getattr(maybe_module, '__file__') if maybe_file_name is not None: return os.path.dirname(os.path.abspath(maybe_file_name)) # Find importable file if applicable. try: spec = find_spec(import_name) except BaseException as err: raise ImportError( f'Exception occurred while finding loader for {import_name!r} ({type(err)}{err})' ) from err if spec is None: loader = None else: loader = spec.loader # Not found, probably the main file? if (loader is None) or (import_name == '__main__'): return os.getcwd() # Get file name from loader. path = loader.get_filename(import_name) return os.path.dirname(os.path.abspath(path)) else: if type(root_path) is str: pass elif isinstance(root_path, str): root_path = str(root_path) else: raise TypeError( f'`root_path` can be given as `str` instance, got {root_path.__class__.__name__}.' ) if not root_path: raise ValueError(f'`root_path` cannot be given as empty string.') return root_path def _validate_static_folder(static_folder): """ Validates the given static folder value. Parameters ---------- static_folder : `None` or `str` Static folder value to validate. Returns ------- static_folder : `None` or `str` The validated static folder value. Raises ------ TypeError If `static_folder` was not given neither as `None` nor `str` instance. ValueError If `static_folder` was given as empty string. """ if static_folder is not None: if type(static_folder) is str: pass elif isinstance(static_folder, str): static_folder = str(static_folder) else: raise TypeError( f'`static_folder` can be given as `str` instance, got {static_folder.__class__.__name__}.' ) if not static_folder: raise ValueError(f'`static_folder` cannot be given as empty string.') return static_folder def _validate_static_url_path(static_url_path): """ Validates the given static folder value. Parameters ---------- static_url_path : `str` Static url path value to validate. Returns ------- static_url_path : `None` or `str` The validated static url path value. Raises ------ TypeError If `static_url_path` was not given either as `None` or `str` instance. """ if static_url_path is not None: if type(static_url_path) is str: pass elif isinstance(static_url_path, str): static_url_path = str(static_url_path) else: raise TypeError( f'`static_url_path` can be given as `str` instance, got ' f'{static_url_path.__class__.__name__}.' ) return static_url_path def _validate_url_prefix(url_prefix): """ Validates the given url prefix converting it into url route parts. Parameters --------- url_prefix : `None` or `str` Url prefix for a blueprint. Returns ------- url_prefix_processed : `None` or `tuple` of `tuple` (`str`, `int`) The processed url prefix. Raises ------ TypeError - If `url_prefix` was neither given as `None` or as `str` instance. - If `url_prefix` contains a `path` rule part. """ if url_prefix is None: url_prefix_processed = None else: if type(url_prefix) is str: pass elif isinstance(url_prefix, str): url_prefix = str(url_prefix) else: raise TypeError( f'`url_prefix` can be given as `str` instance, got {url_prefix.__class__.__name__}.' ) url_prefix_processed = tuple( maybe_typed_rule_part(rule_part) for rule_part in URL(url_prefix).path ) if url_prefix_processed: for parameter_type, parameter_name in url_prefix_processed: if parameter_type == PARAMETER_TYPE_PATH: raise TypeError( f'Only last rule part can be `path` type, got {url_prefix!r}.' ) else: url_prefix_processed = None return url_prefix_processed DUMMY_RULE_PART = (PARAMETER_TYPE_STATIC, '/') def _merge_url_rule(rule_before, rule_after): """ Merges two url rule parts. Parameters ---------- rule_before : `None` or `tuple` of `tuple` (`int`, `str`) First url part if any to join `rule_after` to. rule_after : `None` or `tuple` of `tuple` (`int`, `str`) Second url part what's start is extended by `rule_before`. Returns ------- merged_rule : `None` or `tuple` of `tuple` (`int`, `str`) The merged rule. """ if rule_before is None: return rule_after if rule_after is None: return rule_before if rule_after[0] == DUMMY_RULE_PART: rule_after = rule_after[1:] return (*rule_before, *rule_after) def _merge_parameters(primary_parameters, secondary_parameters): """ Merges two default parameters list, Parameters ---------- primary_parameters : `None` or `tuple` of `tuple` (`str`, `Any`) Priority parameters, which element's wont be removed. secondary_parameters : `None` or `tuple` of `tuple` (`str`, `Any`) Secondary parameters, which will be merged to `primary_parameters`. Returns ------- merged_parameters : `None` or `tuple` of `tuple` (`str`, `Any`) The merged parameters. """ if primary_parameters is None: return secondary_parameters if secondary_parameters is None: return primary_parameters extend_parameters = [] for secondary_item in secondary_parameters: parameter_name = secondary_item[0] for priority_item in primary_parameters: if priority_item[0] == parameter_name: break else: extend_parameters.append(secondary_item) if not extend_parameters: return primary_parameters return (*primary_parameters, *extend_parameters) class RuleFolder: """ Rule folder, which contains more rules enabling adding them at once. Attributes ---------- endpoint : `str` The endpoint's internal name. keyword_parameter_names : `None` or `tuple` of `str` Keyword only parameter names accepted by `view_func`. kwargs_parameter_supported : `bool` Whether `view_func` accepts `**kwargs` parameter. positional_parameter_names : `None` or `tuple` of `str` Positional only parameter names accepted by `view_func`. rules : `list` of ``Rule`` Rules added. view_func : `async-callable` The function to call when serving a request to the provided endpoint. """ __slots__ = ( 'endpoint', 'keyword_parameter_names', 'kwargs_parameter_supported', 'positional_parameter_names', 'rules', 'view_func', ) def __init__( self, view_func, positional_parameter_names, keyword_parameter_names, kwargs_parameter_supported, endpoint, ): """ Creates a new ``RuleFolder`` instance. Parameters ---------- view_func : `async-callable` The function to call when serving a request to the provided endpoint. positional_parameter_names : `None` or `tuple` of `str` Positional only parameter names accepted by `view_func`. keyword_parameter_names : `None` or `tuple` of `str` Keyword only parameter names accepted by `view_func`. kwargs_parameter_supported : `bool` Whether `view_func` accepts `**kwargs` parameter. endpoint : `str` The endpoint's internal name. """ self.view_func = view_func self.endpoint = endpoint self.rules = [] self.positional_parameter_names = positional_parameter_names self.keyword_parameter_names = keyword_parameter_names self.kwargs_parameter_supported = kwargs_parameter_supported @classmethod def from_rule(cls, rule): """ Creates a rule folder from a rule. Parameters ---------- rule : ``Rule`` The rule to create the folder from. Returns ------- self : ``RuleFolder`` """ self = object.__new__(cls) self.view_func = rule.view_func self.endpoint = rule.endpoint self.rules = [rule] self.positional_parameter_names = rule.positional_parameter_names self.keyword_parameter_names = rule.keyword_parameter_names self.kwargs_parameter_supported = rule.kwargs_parameter_supported return self def add_rule(self, rule, request_methods, parameters, subdomain): """ Adds a rule to the rule folder. Parameters ---------- rule : `tuple` of `tuple` (`int`, `str`) The url rule to register. request_methods : `None` or `set` of `str` Request methods to call `view_func` when received. parameters : `None` or `tuple` of `tuple` (`str`, `Any`) Default parameters to pass to the `view_func`. subdomain : `None` or `str` Whether the route should match the specified subdomain. """ self.rules.append( Rule( rule, self.view_func, self.positional_parameter_names, self.keyword_parameter_names, self.kwargs_parameter_supported, self.endpoint, request_methods, parameters, subdomain, ) ) def copy(self): """ Copies the rule folder. Returns ------- new : ``RuleFolder`` """ new = object.__new__(type(self)) new.rules = [rule.copy() for rule in self.rules] new.view_func = self.view_func new.endpoint = self.endpoint new.positional_parameter_names = self.positional_parameter_names new.keyword_parameter_names = self.keyword_parameter_names new.kwargs_parameter_supported = self.kwargs_parameter_supported return new def set_subdomain(self, subdomain): """ Sets subdomain to each rule of the rule folder. Parameters ---------- subdomain : `None` or `str` Subdomain, what the rule of the blueprint gonna match. """ for rule in self.rules: rule.set_subdomain(subdomain) def set_parameters(self, parameters): """ Sets parameters to each rule of the rule folder. Parameters ---------- parameters : `None` or `tuple` of `tuple` (`str`, `Any`) """ for rule in self.rules: rule.set_parameters(parameters) def set_rule_prefix(self, rule): """ Extends the rule parts of the rules of the rule folder. Parameters ---------- rule : `None` or `tuple` of `tuple` (`str`, `int`) The rule parts to extend the ``Rule``'s with. """ for rule in self.rules: rule.set_rule_prefix(rule) def set_application(self, application): """ Sets the rule folder's rules' application inside of the rule folder Parameters ---------- application : ``WebApp`` """ for rule in self.rules: rule.set_application(application) def set_blueprint_state_stack(self, blueprint_state_stack): """ Sets the rule folder's rules' blueprint stack. Parameters ---------- blueprint_state_stack : `None` or `tuple` of ``BlueprintState`` """ for rule in self.rules: rule.set_blueprint_state_stack(blueprint_state_stack) def __repr__(self): """Returns the rule folder's representation.""" return f'<{self.__class__.__name__} rules={self.rules!r}>' def iter_rules(self): """ Iterates over the rules of the rule folder. This method is a generator. Yields ------ rule :``Rule`` """ for rule in self.rules: yield from rule.iter_rules() class Rule: """ A registered rule. Attributes ---------- application : `None` or ``WebApp`` The parent application. Only added when the rule is registered. blueprint_state_stack : `None` or `tuple` of ``Blueprint`` Blueprint stack for the rule. Only added when the rule is registered. endpoint : `str` The endpoint's internal name. keyword_parameter_names : `None` or `tuple` of `str` Keyword only parameter names accepted by `view_func`. kwargs_parameter_supported : `bool` Whether `view_func` accepts `**kwargs` parameter. parameters : `None` or `tuple` of `tuple` (`str`, `Any`) Default parameters to pass to the `view_func`. positional_parameter_names : `None`
<filename>desietc/online.py """OnlineETC class that intefaces with ICS via callouts implemented by ETCApp. Original code written by <NAME> and copied here 16-Feb-2021 from https://desi.lbl.gov/trac/browser/code/online/ETC/trunk/python/ETC/ETC.py The ETCApp code is hosted at https://desi.lbl.gov/trac/browser/code/online/ETC/trunk/python/ETC/ETCApp.py When this code is run on the mountain under ICS, log messages are written to: /data/msdos/dos_home/instances/<instance>/logs/<instance>-ETC-<timestamp>.log where <instance> is normally "desi_YYYYMMDD" and a new log identified by <timestamp> is started whenever ETCApp restarts. The soft link ETC-current.log points to the most recent log. OnlineETC uses the following callouts to interact with the ETCApp and the rest of DOS: - call_for_acq_image - call_for_pm_info - call_for_sky_image - call_for_gfa_image - call_to_update_status - call_for_png_dir - call_for_exp_dir - call_to_request_stop - call_to_request_split The ETCApp keeps the OnlineETC synchronized with data taking by calling the following methods: - prepare_for_exposure: provides NTS params for the next tile to observe - start: signals that the GFA acquisition exposure has started - start_etc: the spectrograph shutters have just opened - stop_etc: the spectrograph shutters have just closed - stop: the current exposure has finished and the ETC should save its results The start/stop_etc methods are called for each cosmic split and the other methods are called exactly once for each exposure. The ETC image analysis and exposure-time calculations are handled by a separate class desietc.etc.ETCAlgorithm that runs in a thread managed by this class. Synchronization between this worker thread and the main thread that handles calls to the start/stop/... methods above relies on three threading Events: - shutdown: the worker thread exits when this is cleared. - image_processing: an exposure that requires ETC tracking is active. - etc_processing: the spectrograph shutters are open. The get_status() method returns a snapshot of our internal attributes, which are divided into two groups: those updated by the main thread and those updated by the worker thread. See the comments in get_status() for details. """ import datetime import sys import os import threading import time import json try: import DOSlib.logger as logging except ImportError: # Fallback when we are not running as a DOS application. import logging try: from DOSlib.PML import SUCCESS, FAILED except ImportError: SUCCESS, FAILED = True, False import numpy as np import desietc.etc # Define a wrapper around utcnow that can be overridden for offline playback # since the built-in datetime object's methods cannot be overridden directly. def get_utcnow(): return datetime.datetime.utcnow() class OnlineETC(): def __init__(self, shutdown_event, max_telemetry_secs=2, min_exptime_secs=240): """Initialize an ETC instance for use in the ICS online environment. Parameters ---------- shutdown_event : threading.Event Used to signal that we should shutdown. max_telemetry_secs : float Maximum allowed time, in seconds, between telemetry updates when an exposure is in progress. min_exptime_secs : float Minimum allowed spectrograph exposure time in seconds. A stop or split request will never be issued until this interval has elapsed after the spectrograph shutters open (according to the time stamp passed to :meth:`start_etc`). """ self.shutdown_event = shutdown_event self.min_telemetry_interval = datetime.timedelta(seconds=max_telemetry_secs) # Callouts to the ETC application self.call_for_acq_image = None self.call_for_pm_info = None self.call_for_sky_image = None self.call_for_gfa_image = None self.call_to_update_status = None self.call_for_png_dir = None self.call_for_exp_dir = None self.call_to_request_stop = None self.call_to_request_split = None self.call_when_about_to_stop = None self.call_when_about_to_split = None # Initialize the ETC algorithm. This will spawn 6 parallel proccesses (one per GFA) # and allocated ~100Mb of shared memory. These resources will be cleared when gfa_calib = os.getenv('ETC_GFA_CALIB', None) sky_calib = os.getenv('ETC_SKY_CALIB', None) if gfa_calib is None or sky_calib is None: raise RuntimeError('ETC_GFA_CALIB and ETC_SKY_CALIB must be set.') self.ETCalg = desietc.etc.ETCAlgorithm( sky_calib=sky_calib, gfa_calib=gfa_calib, min_exptime_secs=min_exptime_secs, parallel=True) # Initialize status variables self.expid = None self.req_efftime = None self.sbprof = None self.max_exposure_time = None self.cosmics_split_time = None self.maxsplit = None self.warning_time = None self.img_start_time = None self.img_stop_time = None self.etc_start_time = None self.etc_stop_time = None self.img_stop_src = None self.etc_stop_src = None # Create the flags we use to synchronize with the worker thread. self.etc_ready = threading.Event() self.etc_ready.clear() self.image_processing = threading.Event() self.image_processing.clear() self.etc_processing = threading.Event() self.etc_processing.clear() # Start the worker thread. self.etc_thread = None self.start_thread() def start_thread(self, max_startup_time=60): """Start or restart the ETC worker thread. """ if self.etc_thread is not None and self.etc_thread.is_alive(): return if self.etc_thread is not None: logging.error('ETC thread has died so restarting now...') self.etc_thread = threading.Thread(target = self._etc) self.etc_thread.daemon = True self.etc_thread.start() # Wait for the ETCAlgorithm to finish its startup. elapsed = 0 while not self.etc_ready.is_set(): elapsed += 1 if elapsed == max_startup_time: raise RuntimeError(f'ETC did not start up after {max_startup_time}s.') logging.info(f'[{elapsed}/{max_startup_time}] Waiting for ETC startup...') time.sleep(1) logging.info('ETCAlgorithm is ready.') def _etc(self): """This is the ETC algorithm that does the actual work. This function normally runs in a separate thread and synchronizes with the rest of ICS via two flags: image_processing and etc_processing. image_processing is set to indicate that the following are or will soon be available: - fiberassign file - acquisition image - PlateMaker guide stars etc_processing is set when the spectrograph shutter has opened so effective exposure time tracking should start or resume. etc_processing is cleared when the spectrograph shutter has closed so effective exposure time tracking should be paused. image_processing is cleared to indicate that the sequence of cosmic splits for an exposure has finished so we should save our outputs to the exposure directory. When image_processing is set, a new status update is generated after: - the initial acquisition image has been processed (which takes ~10s in parallel mode) - a new guide frame has been processed (which takes ~0.5s) - a new sky frame has been processed (which takes ~0.2s) The thread that runs this function is started in our constructor. """ logging.info('ETCAlgorithm thread starting.') try: self.ETCalg.start() self.etc_ready.set() except Exception as e: self.etc_ready.clear() logging.error(f'ETCAlgorithm.start failed with: {e}.') last_image_processing = last_etc_processing = False sent_warn_stop = sent_warn_split = False sent_req_stop = sent_req_split = False last_telemetry = get_utcnow() try: while not self.shutdown_event.is_set(): if self.image_processing.is_set(): # An exposure is active. have_new_telemetry = False # Any changes of state to propagate? if not last_image_processing: # A new exposure is starting: pass through prepare_for_exposure args now. self.ETCalg.start_exposure( self.img_start_time, self.expid, self.req_efftime, self.sbprof, self.max_exposure_time, self.cosmics_split_time, self.maxsplit, self.warning_time) last_image_processing = True # Set the path where the PNG generated after the acquisition analysis will be written. self.ETCalg.set_image_path(self.call_for_exp_dir(self.expid)) # Flush any old GFA and SKY frames. nflush_sky = nflush_gfa = 0 while self.call_for_sky_image(): nflush_sky += 1 while self.call_for_gfa_image(): nflush_gfa += 1 logging.info(f'Flushed {nflush_sky} SKY, {nflush_gfa} GFA frames.') # Look for the acquisition image and PlateMaker guide stars next. need_acq_image = need_stars = True elif not last_etc_processing and self.etc_processing.is_set(): # Shutter just opened. self.ETCalg.open_shutter( self.expid, self.etc_start_time, self.splittable, self.max_shutter_time) last_etc_processing = True sent_warn_stop = sent_warn_split = False sent_req_stop = sent_req_split = False have_new_telemetry = True elif last_etc_processing and not self.etc_processing.is_set(): # Shutter just closed. self.ETCalg.close_shutter(self.etc_stop_time) last_etc_processing = False have_new_telemetry = True # Save the ETC outputs for this shutter. self.ETCalg.save_exposure(self.call_for_exp_dir(self.expid)) # Reset the PNG output path. self.ETCalg.set_image_path(None) # Start looking for updated PlateMaker guide star locations for the next split. # Any guide frames that arrive before this update will be queued, then processed # using the new locations. need_stars = True # Process a sky frame if available. sky_image = self.call_for_sky_image() if sky_image: try: self.ETCalg.process_sky_frame(sky_image['image'], get_utcnow()) have_new_telemetry = True except Exception as e: logging.error(f'process_sky_frame failed: {e}') if need_acq_image: # Process the acquisition image if available. acq_image = self.call_for_acq_image() if acq_image: self.ETCalg.process_acquisition(acq_image['image']) img_path = self.ETCalg.image_path / f'etc-{self.expid:08d}.png' if img_path.exists(): self.call_when_image_ready(self.expid, filename=str(img_path)) self.ETCalg.read_fiberassign(acq_image['fiberassign']) have_new_telemetry = True need_acq_image = False if need_stars: # Process the PlateMaker guide stars if available. pm_info = self.call_for_pm_info() if pm_info: self.ETCalg.set_guide_stars(pm_info['guidestars']) need_stars = False if not need_acq_image and not need_stars: # We have PSF models and guide stars: process a guide frame if available. gfa_image = self.call_for_gfa_image() if gfa_image: try: self.ETCalg.process_guide_frame(gfa_image['image'], get_utcnow()) have_new_telemetry = True except Exception as e: logging.error(f'process_guide_frame failed: {e}') # Is there an action to take associated with new telemetry? if have_new_telemetry and self.ETCalg.accum.action is not None: action, cause = self.ETCalg.accum.action if action == 'stop' and not sent_req_stop: self.call_to_request_stop(cause) sent_req_stop = True elif action == 'split' and self.splittable and not sent_req_split: self.call_to_request_split(cause) sent_req_split = True elif action == 'warn-stop' and not sent_warn_stop: self.call_when_about_to_stop(cause) sent_warn_stop = True elif action == 'warn-split' and not sent_warn_split: self.call_when_about_to_split(cause) sent_warn_split = True #
False * sendInvite: If set to true when creating a user, an invitation email will be sent (if the user is created in active state). True or False * authType: The authentication type for the user. 'ad' (AD), 'sso' (SAML SSO), 'egnyte' (Internal Egnyte) * userType: The Egnyte role of the user. 'admin' (Administrator), 'power' (Power User), 'standard' (Standard User) * idpUserId: Only required if the user is SSO authenticated and not using default user mapping. Do not specify if user is not SSO authenticated. This is the way the user is identified within the SAML Response from an SSO Identity Provider, i.e. the SAML Subject (e.g. jsmith) * userPrincipalName: Do not specify if user is not AD authenticated. Used to bind child authentication policies to a user when using Active Directory authentication in a multi-domain setup (e.g. <EMAIL>) """ url = self._client.get_url(self._url_template, id=self.id) name = base.filter_none_values(dict(familyName=familyName, givenName=givenName)) or None data = base.filter_none_values(dict(email=email, active=active, name=name, sendInvite=sendInvite, authType=authType, userType=userType, idpUserId=idpUserId, userPrincipalName=userPrincipalName)) json = exc.default.check_json_response(self._client.PATCH(url, data)) self._update_attributes(json) def get_effective_permissions(self, path): url = self._client.get_url(self._url_template_effective_permissions, userName=self.userName) r = exc.default.check_json_response(self._client.GET(url, params=dict(folder=path))) return r class Note(base.Resource): """Note attached to a file or folder.""" _url_template = "pubapi/v1/notes/%(id)s" _lazy_attributes = {'file_id', 'file_path', 'can_delete', 'creation_time', 'message', 'username', 'formatted_name'} def delete(self): """Delete this Note""" base.Resource.delete(self) def get_file(self): """Get the file to which this note is attached.""" return self._client.file(self.file_path) class Group(base.Resource): """Group of users.""" _url_template = "pubapi/v2/groups/%(id)s" _lazy_attributes = {'displayName', 'members'} def delete(self): """Delete this Group""" base.Resource.delete(self) def full_update(self, displayName, members=None): """ Full update of this group. This endpoint is used to overwrite all of the attributes of a group. This is especially useful for making a change to settings that ensures all prior settings are removed. * displayName: Name of the group (string). Required * members: List of members to be added to the new group (user ids or User objects). Optional. """ url = self._client.get_url(self._url_template, id=self.id) data = dict(displayName=displayName) if members is not None: data['members'] = [dict(value=x.id if isinstance(x, User) else x) for x in members] json = exc.default.check_json_response(self._client.PATCH(url, data)) self._update_attributes(json) class Links(base.HasClient): """Link management API""" _url_template = "pubapi/v1/links" def create(self, path, type, accessibility, recipients=None, send_email=None, message=None, copy_me=None, notify=None, link_to_current=None, expiry_date=None, expiry_clicks=None, add_filename=None, ): """ Create links. * path: The absolute path of the destination file or folder. * type: This determines what type of link will be created ('File' or 'Folder') * accessibility: Determines who a link is accessible by ('Anyone', 'Password', 'Domain', 'Recipients') * send_email: If True, the link will be sent via email by Egnyte. * recipients: List email addresses of recipients of the link. Only required if send_email is True (List of valid email addresses) * message: Personal message to be sent in link email. Only applies if send_email is True (plain text) * copy_me: If True, a copy of the link message will be sent to the link creator. Only applies if send_email is True. * notify: If True, link creator will be notified via email when link is accessed. * link_to_current: If True, link will always refer to current version of file. Only applicable for file links. * expiry_date: The expiry date for the link. If expiry_date is specified, expiry_clicks cannot be set (future date as datetime.date or string in YYYY-MM-DD format) * expiry_clicks: The number of clicks the link is valid for. If expiry_clicks is specified, expiry_date cannot be set (value must be between 1 - 10, inclusive) * add_filename: If True then the filename will be appended to the end of the link. Only applies to file links, not folder links. Will return a sequence of created Links, one for each recipient. """ url = self._client.get_url(self._url_template) data = base.filter_none_values(dict(path=path, type=type, accessibility=accessibility, send_email=send_email, copy_me=copy_me, notify=notify, add_filename=add_filename, link_to_current=link_to_current, expiry_clicks=expiry_clicks, expiry_date=base.date_format(expiry_date), recipients=recipients, message=message)) response = exc.default.check_json_response(self._client.POST(url, data)) # This response has weird structure links = response.pop('links') result = [] for l in links: l.update(response) result.append(Link(self._client, **l)) return result def get(self, id): """Get a Link object by it's id""" return Link(self._client, id=id) def list(self, path=None, username=None, created_before=None, created_after=None, type=None, accessibility=None, offset=None, count=None): """ Search links that match following optional conditions: * path: List links to this file or folder (Full absolute path of destination file or folder) * username: List links created by this user (Any username from your Egnyte account) * created_before: List links created before this date (datetime.date, or string in YYYY-MM-DD format) * created_after: List links created after this date (datetime.date, or string in YYYY-MM-DD format) * type: Links of selected type will be shown ('File' or 'Folder') * accessibility: Links of selected accessibility will be shown ('Anyone', 'Password', 'Domain', or 'Recipients') * offset: Start at this link, where offset=0 means start with first link. * count: Send this number of links. If not specified, all links will be sent. Returns a list of Link objects, with additional total_count and offset attributes. """ url = self._client.get_url(self._url_template) params = base.filter_none_values(dict(path=path, username=username, created_before=base.date_format(created_before), created_after=base.date_format(created_after), type=type, accessibility=accessibility, offset=offset, count=count)) json = exc.default.check_json_response(self._client.GET(url, params=params)) return base.ResultList((Link(self._client, id=id) for id in json.get('ids', ())), json['total_count'], json['offset']) class Users(base.HasClient): """User management API""" _url_template = "pubapi/v2/users" def list(self, email=None, externalId=None, userName=None, startIndex=None, count=None): """ Search users. Optional search parameters are 'email', 'externalId' and 'userName'. startIndex (starts with 1) and count may be used for pagination Returns a list of User objects, with additional total_count and offset attributes. """ url = self._client.get_url(self._url_template) filters = base.filter_none_values(dict(email=email, externalId=externalId, userName=userName)) params = base.filter_none_values(dict(startIndex=startIndex, count=count)) params['filter'] = [u'%s eq "%s"' % (k, v) for (k, v) in filters.items()] json = exc.default.check_json_response(self._client.GET(url, params=params)) return base.ResultList((User(self._client, **d) for d in json.get('resources', ())), json['totalResults'], json['startIndex'] - 1) def get(self, id): """Get a User object by id. Does not check if User exists.""" return User(self._client, id=id) def by_email(self, email): """Get a User object by email. Returns None if user does not exist""" try: return self.list(email=email)[0] except LookupError: pass def by_username(self, userName): """Get a User object by username. Returns None if user does not exist""" try: return self.list(userName=userName)[0] except LookupError: pass def create(self, userName, externalId, email, familyName, givenName, active=True, sendInvite=True, authType='egnyte', userType='power', role=None, idpUserId=None, userPrincipalName=None): """ Create a new user account. Parameters: * userName: The Egnyte username for the user. Username must start with a letter or digit. Special characters are not supported (with the exception of periods, hyphens, and underscores). * externalId: This is an immutable unique identifier provided by the API consumer. Any plain text (e.g. S-1-5-21-3623811015-3361044348-30300820-1013) * email: The email address of the user. Any valid email address (e.g. <EMAIL>) * familyName: The last name of the user. Any plain text (e.g. John) * givenName: The first name of the user. Any plain text (e.g. Smith) * active: Whether the user is active or inactive. True or False * sendInvite: If set to true when creating a user, an invitation email will be sent (if the user is created in active state). True or False * authType: The authentication type for the user. 'ad' (AD), 'sso' (SAML SSO), 'egnyte' (Internal Egnyte) * userType: The type of the user. 'admin' (Administrator), 'power' (Power User), 'standard' (Standard User) * role: The role assigned to the user. Only applicable for Power Users. Default or custom role name * idpUserId: Only required if the user is SSO authenticated and not using default user mapping. Do not specify if user is not SSO authenticated. This is the way the user is identified within the SAML Response from an SSO Identity Provider, i.e. the SAML Subject (e.g. jsmith) * userPrincipalName: Do not specify if user is not AD authenticated. Used to bind child authentication policies to a user when using Active Directory authentication in a multi-domain setup (e.g. <EMAIL>) Returns created User object. """ url = self._client.get_url(self._url_template) data = base.filter_none_values(dict(userName=userName, externalId=externalId, email=email, name=dict(familyName=familyName, givenName=givenName), active=active, sendInvite=sendInvite, authType=authType, userType=userType, role=role, idpUserId=idpUserId, userPrincipalName=userPrincipalName)) json = exc.created.check_json_response(self._client.POST(url, data)) return User(self._client, **json) class PermissionSet(object): """Wrapper for a permission set""" def __init__(self, json):
= 1 for media_type, model in dm_person_models.items(): if not media_type == 'Twitter': self.write_primary_row_heading(ws, media_type, c=c+1, r=4) secondary_counts = OrderedDict() country = model.sheet_name() + '__country' for code, answer in YESNO: counts = Counter() rows = model.objects\ .values('sex', country)\ .filter(**{country + '__in': self.country_list})\ .filter(sex__in=self.male_female_ids)\ .filter(is_quoted=code)\ .annotate(n=Count('id')) for row in rows: counts.update({(row['sex'], self.recode_country(row[country])): row['n']}) secondary_counts[answer] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, self.countries, row_perc=True, show_N=True, c=c, r=8) c = ws.dim_colmax + 2 def ws_s24(self, ws): """ Cols: Photographed; Sex Rows: Country :: Internet, Twitter """ c = 1 for media_type, model in dm_person_models.items(): self.write_primary_row_heading(ws, media_type, c=c+1, r=4) secondary_counts = OrderedDict() country = model.sheet_name() + '__country' for code, answer in IS_PHOTOGRAPH: counts = Counter() rows = model.objects\ .values('sex', country)\ .filter(**{country + '__in': self.country_list})\ .filter(sex__in=self.male_female_ids)\ .filter(is_photograph=code)\ .annotate(n=Count('id')) for row in rows: counts.update({(row['sex'], self.recode_country(row[country])): row['n']}) secondary_counts[answer] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, self.countries, row_perc=True, show_N=True, c=c, r=8) c = ws.dim_colmax + 2 def ws_s25(self, ws): """ Cols: Major topics; Sex Rows: Country :: Internet, Twitter """ c = 1 for media_type, model in dm_journalist_models.items(): self.write_primary_row_heading(ws, media_type, c=c+1, r=4) secondary_counts = OrderedDict() for major_topic, topic_ids in GROUP_TOPICS_MAP.items(): counts = Counter() country = model.sheet_name() + '__country' topic = model.sheet_name() + '__topic' rows = model.objects\ .values('sex', country)\ .filter(**{country + '__in': self.country_list})\ .filter(sex__in=self.male_female_ids)\ .filter(**{topic + '__in': topic_ids})\ .annotate(n=Count('id')) if media_type in REPORTER_MEDIA: rows = rows.filter(role=REPORTERS) counts.update({(r['sex'], self.recode_country(r[country])): r['n'] for r in rows}) major_topic_name = [mt[1] for mt in MAJOR_TOPICS if mt[0] == int(major_topic)][0] secondary_counts[major_topic_name] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, self.countries, row_perc=True, show_N=True, c=c, r=8) c = ws.dim_colmax + 2 def ws_s26(self, ws): """ Cols: Major topics; Women Central Rows: Country :: Internet, Twitter """ c = 1 for media_type, model in dm_sheet_models.items(): self.write_primary_row_heading(ws, media_type, c=c+1, r=4) counts = Counter() rows = model.objects\ .values('topic', 'country')\ .filter(country__in=self.country_list)\ .filter(about_women='Y')\ .annotate(n=Count('id')) for row in rows: major_topic = TOPIC_GROUPS[row['topic']] counts.update({(major_topic, self.recode_country(row['country'])): row['n']}) self.tabulate(ws, counts, MAJOR_TOPICS, self.countries, raw_values=True, c=c, r=7, write_col_totals=False) c = ws.dim_colmax + 2 def ws_s27(self, ws): """ Cols: Stereotypes Rows: Country :: Internet, Twitter """ c = 1 for media_type, model in dm_sheet_models.items(): self.write_primary_row_heading(ws, media_type, c=c+1, r=4) counts = Counter() rows = model.objects\ .values('stereotypes', 'country')\ .filter(country__in=self.country_list)\ .annotate(n=Count('id')) for row in rows: counts.update({(row['stereotypes'], self.recode_country(row['country'])): row['n']}) self.tabulate(ws, counts, AGREE_DISAGREE, self.countries, raw_values=True, c=c, r=7, write_col_totals=False) c = ws.dim_colmax + 2 def ws_s28(self, ws): """ Cols: Sex of subject Rows: Country """ counts = Counter() for _, model in person_models.items(): sheet_name = model.sheet_name() country_field = f"{sheet_name}__country" rows = model.objects \ .values('sex', country_field) \ .filter(**{f"{sheet_name}__covid19": 1}) \ .filter(**{f"{country_field}__in": self.country_list}) \ .filter(sex__in=self.male_female_ids) \ .annotate(n=Count('id')) for row in rows: counts.update({(row['sex'], self.recode_country(row[country_field])): row['n']}) self.tabulate(ws, counts, self.male_female, self.countries, row_perc=True, show_N=True) def ws_s29(self, ws): """ Cols: Sex of reporter Rows: Country """ counts = Counter() for _, model in journalist_models.items(): sheet_name = model.sheet_name() country_field = f"{sheet_name}__country" rows = model.objects \ .values('sex', country_field) \ .filter(**{f"{sheet_name}__covid19": 1}) \ .filter(**{f"{country_field}__in": self.country_list}) \ .filter(sex__in=self.male_female_ids) \ .annotate(n=Count('id')) for row in rows: counts.update({(row['sex'], self.recode_country(row[country_field])): row['n']}) self.tabulate(ws, counts, self.male_female, self.countries, row_perc=True, show_N=True) def ws_sr01(self, ws): """ Cols: Sex of presenters, reporters and subjects Rows: Country :: Newspaper, television, radio by region """ secondary_counts = OrderedDict() presenter_reporter = [('Presenter',[1, 3]), ('Reporter', [2])] for journo_type, role_ids in presenter_reporter: counts = Counter() if journo_type == 'Presenter': journo_models = broadcast_journalist_models elif journo_type == 'Reporter': journo_models = tm_journalist_models for media_type, model in journo_models.items(): region = model.sheet_name() + '__country_region__region' rows = model.objects\ .values('sex', region)\ .filter(**{region + '__in': self.all_region_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) if media_type in REPORTER_MEDIA: # Newspaper journos don't have roles rows = rows.filter(role__in=role_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for row in rows: region_id = [r[0] for r in self.all_regions if r[1] == row["region"]][0] counts.update({(row['sex'], region_id): row['n']}) secondary_counts[journo_type] = counts counts = Counter() for media_type, model in tm_person_models.items(): region = model.sheet_name() + '__country_region__region' rows = model.objects\ .values('sex', region)\ .filter(**{region + '__in': self.all_region_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for row in rows: region_id = [r[0] for r in self.all_regions if r[1] == row["region"]][0] counts.update({(row['sex'], region_id): row['n']}) secondary_counts['Subjects'] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, self.all_regions, row_perc=True, show_N=True) def ws_sr02(self, ws): """ Cols: Major topics; Sex Rows: Country :: Newspaper, television, radio by region """ secondary_counts = OrderedDict() for major_topic, topic_ids in GROUP_TOPICS_MAP.items(): counts = Counter() for media_type, model in tm_sheet_models.items(): region = 'country_region__region' person_sex_field = '%s__sex' % model.person_field_name() rows = model.objects\ .values(person_sex_field, region)\ .filter(**{region + '__in': self.all_region_list})\ .filter(**{person_sex_field + '__in': self.male_female_ids})\ .filter(topic__in=topic_ids)\ .annotate(n=Count('id')) rows = self.apply_weights(rows, model._meta.db_table, media_type) for row in rows: region_id = [r[0] for r in self.all_regions if r[1] == row["region"]][0] counts.update({(row["sex"], region_id): row['n']}) major_topic_name = [mt[1] for mt in MAJOR_TOPICS if mt[0] == int(major_topic)][0] secondary_counts[major_topic_name] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, self.all_regions, row_perc=True, show_N=True) def ws_sr03(self, ws): """ Cols: Function; Sex Rows: Country :: Newspaper, television, radio by region """ secondary_counts = OrderedDict() for function_id, function in FUNCTION: counts = Counter() for media_type, model in tm_person_models.items(): region = model.sheet_name() + '__country_region__region' rows = model.objects\ .values('sex', region)\ .filter(**{region + '__in': self.all_region_list})\ .filter(sex__in=self.male_female_ids)\ .filter(function=function_id)\ .annotate(n=Count('id')) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for row in rows: region_id = [r[0] for r in self.all_regions if r[1] == row["region"]][0] counts.update({(row['sex'], region_id): row['n']}) secondary_counts[clean_title(function)] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, self.all_regions, row_perc=True, show_N=True) def ws_sr04(self, ws): """ Cols: Photographed; Sex Rows: Country :: Newspaper only region """ secondary_counts = OrderedDict() model = person_models.get('Print') for code, answer in IS_PHOTOGRAPH: counts = Counter() region = model.sheet_name() + '__country_region__region' rows = model.objects\ .values('sex', region)\ .filter(**{region + '__in': self.all_region_list})\ .filter(sex__in=self.male_female_ids)\ .filter(is_photograph=code)\ .annotate(n=Count('id')) rows = self.apply_weights(rows, model.sheet_db_table(), 'Print') for row in rows: region_id = [r[0] for r in self.all_regions if r[1] == row["region"]][0] counts.update({(row['sex'], region_id): row['n']}) secondary_counts[answer] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, self.all_regions, row_perc=True, show_N=True) def ws_sr05(self, ws): """ Cols: Media; Journo Type; Sex Rows: Country :: Newspaper, television, radio by region """ c = 1 r = 8 write_row_headings = True for media_type, model in journalist_models.items(): if media_type in broadcast_journalist_models: presenter_reporter = [('Presenter',[1, 3]), ('Reporter', [2])] else: # Newspaper journos don't have roles presenter_reporter = [('Reporter', [])] col = c + (1 if write_row_headings else 0) merge_range = (len(presenter_reporter) * len(self.male_female) * 2) - 1 ws.merge_range(r-4, col, r-4, col + merge_range, clean_title(media_type), self.col_heading) secondary_counts = OrderedDict() for journo_type, role_ids in presenter_reporter: counts = Counter() region = model.sheet_name() + '__country_region__region' rows = model.objects\ .values('sex', region)\ .filter(**{region + '__in': self.all_region_list})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) if media_type in REPORTER_MEDIA: # Newspaper journos don't have roles rows = rows.filter(role__in=role_ids) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for row in rows: region_id = [reg[0] for reg in self.all_regions if reg[1] == row["region"]][0] counts.update({(row['sex'], region_id): row['n']}) secondary_counts[journo_type] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, self.all_regions, row_perc=True, show_N=True, c=c, r=r, write_row_headings=write_row_headings) c += (len(presenter_reporter) * len(self.male_female) * 2) + (1 if write_row_headings else 0) write_row_headings = False def ws_sr06(self, ws): """ Cols: Major topics; Sex Rows: Country :: Newspaper, television, radio by region """ secondary_counts = OrderedDict() for major_topic, topic_ids in GROUP_TOPICS_MAP.items(): counts = Counter() for media_type, model in tm_sheet_models.items(): region = 'country_region__region' journo_sex_field = '%s__sex' % model.journalist_field_name() journo_role_field = '%s__role' % model.journalist_field_name() rows = model.objects\ .values(journo_sex_field, region)\ .filter(**{region + '__in': self.all_region_list})\ .filter(**{journo_sex_field + '__in': self.male_female_ids})\ .filter(topic__in=topic_ids)\ .annotate(n=Count('id')) if media_type in REPORTER_MEDIA: # Newspaper journos don't have roles rows = rows.filter(**{journo_role_field: REPORTERS}) rows = self.apply_weights(rows, model._meta.db_table, media_type) for row in rows: region_id = [r[0] for r in self.all_regions if r[1] == row["region"]][0] counts.update({(row["sex"], region_id): row['n']}) major_topic_name = [mt[1] for mt in MAJOR_TOPICS if mt[0] == int(major_topic)][0] secondary_counts[major_topic_name] = counts self.tabulate_secondary_cols(ws, secondary_counts, self.male_female, self.all_regions, row_perc=True, show_N=True) def ws_sr07(self, ws): """ Cols: Journalist Sex, Subject Sex Rows: Country :: Newspaper, television, radio by region """ secondary_counts = OrderedDict() for sex_id, sex in self.male_female: counts = Counter() for media_type, model in tm_person_models.items(): sheet_name = model.sheet_name() journo_name = model._meta.get_field(model.sheet_name()).remote_field.model.journalist_field_name() region = model.sheet_name() + '__country_region__region' rows = model.objects\ .values('sex', region)\ .filter(**{region + '__in': self.all_region_list})\ .filter(**{sheet_name + '__' + journo_name + '__sex':sex_id})\ .filter(sex__in=self.male_female_ids)\ .annotate(n=Count('id')) if media_type in REPORTER_MEDIA: rows = rows.filter(**{sheet_name + '__' + journo_name + '__role':REPORTERS}) rows = self.apply_weights(rows, model.sheet_db_table(), media_type) for row in rows: region_id = [r[0] for r in self.all_regions if r[1] == row["region"]][0] counts.update({(row['sex'], region_id):
[3, 's66'], # [3, 's67'], # [3, 's68'], # [3, 's69'], # [3, 's60'], # [3, 's71'], # [3, 's72'], # [3, 's73'], # [3, 's74'], # [3, 's75'], # [3, 's76'], # [3, 's77'], # [3, 's78'], # [3, 's79'], # [3, 's70'], ['s61', 's62'], ['s61', 's63'], ['s62', 's63'], ] sybil_edges5 = [ [6, 's81'], [6, 's82'], [6, 's83'], # [6, 's84'], # [6, 's85'], # [6, 's86'], # [6, 's87'], # [6, 's88'], # [6, 's89'], # [6, 's80'], # [6, 's91'], # [6, 's92'], # [6, 's93'], # [6, 's94'], # [6, 's95'], # [6, 's96'], # [6, 's97'], # [6, 's98'], # [6, 's99'], # [6, 's90'], [7, 's81'], [7, 's82'], [7, 's83'], # [7, 's84'], # [7, 's85'], # [7, 's86'], # [7, 's87'], # [7, 's88'], # [7, 's89'], # [7, 's80'], # [7, 's91'], # [7, 's92'], # [7, 's93'], # [7, 's94'], # [7, 's95'], # [7, 's96'], # [7, 's97'], # [7, 's98'], # [7, 's99'], # [7, 's90'], [1, 's81'], [1, 's82'], [1, 's83'], # [1, 's84'], # [1, 's85'], # [1, 's86'], # [1, 's87'], # [1, 's88'], # [1, 's89'], # [1, 's80'], # [1, 's91'], # [1, 's92'], # [1, 's93'], # [1, 's94'], # [1, 's95'], # [1, 's96'], # [1, 's97'], # [1, 's98'], # [1, 's99'], # [1, 's90'], # [2, 's81'], # [2, 's82'], # [2, 's83'], # [2, 's84'], # [2, 's85'], # [2, 's86'], # [2, 's87'], # [2, 's88'], # [2, 's89'], # [2, 's80'], # [2, 's91'], # [2, 's92'], # [2, 's93'], # [2, 's94'], # [2, 's95'], # [2, 's96'], # [2, 's97'], # [2, 's98'], # [2, 's99'], # [2, 's90'], # [3, 's81'], # [3, 's82'], # [3, 's83'], # [3, 's84'], # [3, 's85'], # [3, 's86'], # [3, 's87'], # [3, 's88'], # [3, 's89'], # [3, 's80'], # [3, 's91'], # [3, 's92'], # [3, 's93'], # [3, 's94'], # [3, 's95'], # [3, 's96'], # [3, 's97'], # [3, 's98'], # [3, 's99'], # [3, 's90'], ['s81', 's82'], ['s81', 's83'], ['s82', 's83'], ] sybil_edges6 = [ [6, 's101'], [6, 's102'], [6, 's103'], # [6, 's104'], # [6, 's105'], # [6, 's106'], # [6, 's107'], # [6, 's108'], # [6, 's109'], # [6, 's100'], # [6, 's111'], # [6, 's112'], # [6, 's113'], # [6, 's114'], # [6, 's115'], # [6, 's116'], # [6, 's117'], # [6, 's118'], # [6, 's119'], # [6, 's110'], [7, 's101'], [7, 's102'], [7, 's103'], # [7, 's104'], # [7, 's105'], # [7, 's106'], # [7, 's107'], # [7, 's108'], # [7, 's109'], # [7, 's100'], # [7, 's111'], # [7, 's112'], # [7, 's113'], # [7, 's114'], # [7, 's115'], # [7, 's116'], # [7, 's117'], # [7, 's118'], # [7, 's119'], # [7, 's110'], [1, 's101'], [1, 's102'], [1, 's103'], # [1, 's104'], # [1, 's105'], # [1, 's106'], # [1, 's107'], # [1, 's108'], # [1, 's109'], # [1, 's100'], # [1, 's111'], # [1, 's112'], # [1, 's113'], # [1, 's114'], # [1, 's115'], # [1, 's116'], # [1, 's117'], # [1, 's118'], # [1, 's119'], # [1, 's110'], # [2, 's101'], # [2, 's102'], # [2, 's103'], # [2, 's104'], # [2, 's105'], # [2, 's106'], # [2, 's107'], # [2, 's108'], # [2, 's109'], # [2, 's100'], # [2, 's111'], # [2, 's112'], # [2, 's113'], # [2, 's114'], # [2, 's115'], # [2, 's116'], # [2, 's117'], # [2, 's118'], # [2, 's119'], # [2, 's110'], # [3, 's101'], # [3, 's102'], # [3, 's103'], # [3, 's104'], # [3, 's105'], # [3, 's106'], # [3, 's107'], # [3, 's108'], # [3, 's109'], # [3, 's100'], # [3, 's111'], # [3, 's112'], # [3, 's113'], # [3, 's114'], # [3, 's115'], # [3, 's116'], # [3, 's117'], # [3, 's118'], # [3, 's119'], # [3, 's110'], ['s101', 's102'], ['s101', 's103'], ['s102', 's103'], # ['s101', 's104'], # ['s101', 's105'], # ['s102', 's104'], # ['s102', 's105'], # ['s103', 's104'], # ['s103', 's105'], # ['s104', 's105'], # ['s106', 's101'], # ['s106', 's102'], # ['s106', 's103'], # ['s106', 's104'], # ['s106', 's105'], # ['s107', 's101'], # ['s107', 's102'], # ['s107', 's103'], # ['s107', 's104'], # ['s107', 's105'], # ['s107', 's106'], ] sybil_edges7 = [ [6, 's121'], [6, 's122'], [6, 's123'], # [6, 's124'], # [6, 's125'], # [6, 's126'], # [6, 's127'], # [6, 's128'], # [6, 's129'], # [6, 's120'], # [6, 's131'], # [6, 's132'], # [6, 's133'], # [6, 's134'], # [6, 's135'], # [6, 's136'], # [6, 's137'], # [6, 's138'], # [6, 's139'], # [6, 's130'], [7, 's121'], [7, 's122'], [7, 's123'], # [7, 's124'], # [7, 's125'], # [7, 's126'], # [7, 's127'], # [7, 's128'], # [7, 's129'], # [7, 's120'], # [7, 's131'], # [7, 's132'], # [7, 's133'], # [7, 's134'], # [7, 's135'], # [7, 's136'], # [7, 's137'], # [7, 's138'], # [7, 's139'], # [7, 's130'], [1, 's121'], [1, 's122'], [1, 's123'], # [1, 's124'], # [1, 's125'], # [1, 's126'], # [1, 's127'], # [1, 's128'], # [1, 's129'], # [1, 's120'], # [1, 's131'], # [1, 's132'], # [1, 's133'], # [1, 's134'], # [1, 's135'], # [1, 's136'], # [1, 's137'], # [1, 's138'], # [1, 's139'], # [1, 's130'], # [2, 's121'], # [2, 's122'], # [2, 's123'], # [2, 's124'], # [2, 's125'], # [2, 's126'], # [2, 's127'], # [2, 's128'], # [2, 's129'], # [2, 's120'], # [2, 's131'], # [2, 's132'], # [2, 's133'], # [2, 's134'], # [2, 's135'], # [2, 's136'], # [2, 's137'], # [2, 's138'], # [2, 's139'], # [2, 's130'], # [3, 's121'], # [3, 's122'], # [3, 's123'], # [3, 's124'], # [3, 's125'], # [3, 's126'], # [3, 's127'], # [3, 's128'], # [3, 's129'], # [3, 's120'], # [3, 's131'], # [3, 's132'], # [3, 's133'], # [3, 's134'], # [3, 's135'], # [3, 's136'], # [3, 's137'], # [3, 's138'], # [3, 's139'], # [3, 's130'], ['s121', 's122'], ['s121', 's123'], ['s122', 's123'], ] sybil_edges8 = [ [6, 's141'], [6, 's142'], [6, 's143'], # [6, 's144'], # [6, 's145'], # [6, 's146'], # [6, 's147'], # [6, 's148'], # [6, 's149'], # [6, 's140'], # [6, 's151'], # [6, 's152'], # [6, 's153'], # [6, 's154'], # [6, 's155'], # [6, 's156'], # [6, 's157'], # [6, 's158'], # [6, 's159'], # [6, 's150'], [7, 's141'], [7, 's142'], [7, 's143'], # [7, 's144'], # [7, 's145'], # [7, 's146'], # [7, 's147'], # [7, 's148'], # [7, 's149'], # [7, 's140'], # [7, 's151'], # [7, 's152'], # [7, 's153'], # [7, 's154'], # [7, 's155'], # [7, 's156'], # [7, 's157'], # [7, 's158'], # [7, 's159'], # [7, 's150'], [1, 's141'], [1, 's142'], [1, 's143'], # [1, 's144'], # [1, 's145'], # [1, 's146'], # [1, 's147'], # [1, 's148'], # [1, 's149'], # [1, 's140'], # [1, 's151'], # [1, 's152'], # [1, 's153'], # [1, 's154'], # [1, 's155'], # [1, 's156'], # [1, 's157'], # [1, 's158'], # [1, 's159'], # [1, 's150'], # [2, 's141'], # [2, 's142'], # [2, 's143'], # [2, 's144'], # [2, 's145'], # [2, 's146'], # [2, 's147'], # [2, 's148'], # [2, 's149'], # [2, 's140'], # [2, 's151'], # [2, 's152'], # [2, 's153'], # [2, 's154'], # [2, 's155'], # [2, 's156'], # [2, 's157'], # [2, 's158'], # [2, 's159'], # [2, 's150'], # [3, 's141'], # [3, 's142'], # [3, 's143'], # [3, 's144'], # [3, 's145'], # [3, 's146'], # [3, 's147'], # [3, 's148'], # [3, 's149'], # [3, 's140'], # [3, 's151'], # [3, 's152'], # [3, 's153'], # [3, 's154'], # [3, 's155'], #
progress percent on failure and final 'success' status self.response._update_status(pywps_status_id, message, self.percent) # noqa: W0212 self.log_message(status=status, message=message, progress=progress) def step_update_status(self, message, progress, start_step_progress, end_step_progress, step_name, target_host, status): # type: (str, Number, Number, Number, str, AnyValueType, str) -> None self.update_status( message="{0} [{1}] - {2}".format(target_host, step_name, str(message).strip()), progress=map_progress(progress, start_step_progress, end_step_progress), status=status, ) def log_message(self, status, message, progress=None, level=logging.INFO): # type: (AnyStatusType, str, Optional[Number], int) -> None progress = progress if progress is not None else self.percent message = get_job_log_msg(status=map_status(status), message=message, progress=progress) self.logger.log(level, message, exc_info=level > logging.INFO) def exception_message(self, exception_type, exception=None, message="no message", status=STATUS_EXCEPTION, level=logging.ERROR): # type: (Type[Exception], Optional[Exception], str, AnyStatusType, int) -> Exception """ Logs to the job the specified error message with the provided exception type. :returns: formatted exception with message to be raised by calling function. """ exception_msg = " [{}]".format(repr(exception)) if isinstance(exception, Exception) else "" self.log_message(status=status, level=level, message="{0}: {1}{2}".format(exception_type.__name__, message, exception_msg)) return exception_type("{0}{1}".format(message, exception_msg)) @property def job(self): # type: () -> Job """ Obtain the job associated to this package execution as specified by the provided UUID. Process must be in "execute" state under :mod:`pywps` for this job to be available. """ if self._job is None: store = get_db(self.settings).get_store(StoreJobs) self._job = store.fetch_by_id(self.uuid) return self._job @classmethod def map_step_progress(cls, step_index, steps_total): # type: (int, int) -> Number """ Calculates the percentage progression of a single step of the full process. .. note:: The step procession is adjusted according to delimited start/end of the underlying `CWL` execution to provide a continuous progress percentage over the complete execution. Otherwise, we would have values that jump around according to whichever progress the underlying remote `WPS` or monitored `CWL` employs, if any is provided. """ return map_progress(100 * step_index / steps_total, PACKAGE_PROGRESS_CWL_RUN, PACKAGE_PROGRESS_CWL_DONE) def _handler(self, request, response): # type: (WPSRequest, ExecuteResponse) -> ExecuteResponse """ Method called when process receives the WPS execution request. """ # pylint: disable=R1260,too-complex # FIXME # note: only 'LOGGER' call allowed here, since 'setup_loggers' not called yet LOGGER.debug("HOME=%s, Current Dir=%s", os.environ.get("HOME"), os.path.abspath(os.curdir)) self.request = request self.response = response self.package_id = self.request.identifier try: # prepare some metadata about the package that are often reused self.package_type = _get_package_type(self.package) self.package_requirement = get_application_requirement(self.package) try: # workflows do not support stdout/stderr log_stdout_stderr = self.package_type != PROCESS_WORKFLOW self.setup_loggers(log_stdout_stderr) self.update_status("Preparing package logs done.", PACKAGE_PROGRESS_PREP_LOG, STATUS_RUNNING) except Exception as exc: raise self.exception_message(PackageExecutionError, exc, "Failed preparing package logging.") self.update_status("Launching package...", PACKAGE_PROGRESS_LAUNCHING, STATUS_RUNNING) # early validation to ensure proper instance is defined for target process/package # Note: # This is only to ensure we stop execution in case some process was deployed somehow with mandatory # remote execution, but cannot accomplish it due to mismatching configuration. This can occur if # configuration was modified and followed by Weaver reboot with persisted WPS-remote process. config = get_weaver_configuration(self.settings) self.remote_execution = config in WEAVER_CONFIGURATIONS_REMOTE problem_needs_remote = check_package_instance_compatible(self.package) if not self.remote_execution: if problem_needs_remote: raise self.exception_message( PackageExecutionError, message="Weaver instance is configured as [{}] but remote execution with one of {} is " "required for process [{}] because {}. Aborting execution.".format( config, list(WEAVER_CONFIGURATIONS_REMOTE), self.package_id, problem_needs_remote ) ) # switch back to local execution if hybrid execution can handle this package by itself (eg: Docker, builtin) elif config == WEAVER_CONFIGURATION_HYBRID: self.remote_execution = problem_needs_remote is not None if self.remote_execution: # EMS/Hybrid dispatch the execution to ADES or remote WPS loading_context = LoadingContext() loading_context.construct_tool_object = self.make_tool else: # ADES/Hybrid execute the CWL/AppPackage locally loading_context = None self.update_effective_user() self.update_requirements() runtime_params = self.setup_runtime() self.logger.debug("Using cwltool.RuntimeContext args:\n%s", json.dumps(runtime_params, indent=2)) runtime_context = RuntimeContext(kwargs=runtime_params) try: package_inst, _, self.step_packages = _load_package_content(self.package, package_name=self.package_id, # no data source for local package data_source=None, loading_context=loading_context, runtime_context=runtime_context) self.step_launched = [] except Exception as ex: raise PackageRegistrationError("Exception occurred on package instantiation: '{!r}'".format(ex)) self.update_status("Loading package content done.", PACKAGE_PROGRESS_LOADING, STATUS_RUNNING) try: cwl_inputs_info = {i["name"]: i for i in package_inst.t.inputs_record_schema["fields"]} self.update_status("Retrieve package inputs done.", PACKAGE_PROGRESS_GET_INPUT, STATUS_RUNNING) except Exception as exc: raise self.exception_message(PackageExecutionError, exc, "Failed retrieving package input types.") try: # identify EOImages from payload request.inputs = opensearch.get_original_collection_id(self.payload, request.inputs) eoimage_data_sources = opensearch.get_eo_images_data_sources(self.payload, request.inputs) if eoimage_data_sources: self.update_status("Found EOImage data-source definitions. " "Updating inputs with OpenSearch sources.", PACKAGE_PROGRESS_ADD_EO_IMAGES, STATUS_RUNNING) accept_mime_types = opensearch.get_eo_images_mime_types(self.payload) opensearch.insert_max_occurs(self.payload, request.inputs) request.inputs = opensearch.query_eo_images_from_wps_inputs(request.inputs, eoimage_data_sources, accept_mime_types, settings=self.settings) cwl_inputs = self.make_inputs(request.inputs, cwl_inputs_info) self.update_status("Convert package inputs done.", PACKAGE_PROGRESS_CONVERT_INPUT, STATUS_RUNNING) except PackageException as exc: raise self.exception_message(type(exc), None, str(exc)) # re-raise as is, but with extra log entry except Exception as exc: raise self.exception_message(PackageExecutionError, exc, "Failed to load package inputs.") try: self.update_status("Checking package prerequisites... " "(operation could take a while depending on requirements)", PACKAGE_PROGRESS_PREPARATION, STATUS_RUNNING) setup_status = self.setup_docker_image() if setup_status not in [None, True]: raise PackageAuthenticationError self.update_status("Package ready for execution.", PACKAGE_PROGRESS_PREPARATION, STATUS_RUNNING) except Exception: # noqa: W0703 # nosec: B110 # don't pass exception to below message raise self.exception_message(PackageAuthenticationError, None, "Failed Docker image preparation.") try: self.update_status("Running package...", PACKAGE_PROGRESS_CWL_RUN, STATUS_RUNNING) self.logger.debug("Launching process package with inputs:\n%s", json.dumps(cwl_inputs, indent=2)) result = package_inst(**cwl_inputs) # type: CWL_Results self.update_status("Package execution done.", PACKAGE_PROGRESS_CWL_DONE, STATUS_RUNNING) except Exception as exc: if isinstance(exc, CWLException): lines = self.insert_package_log(exc) LOGGER.debug("Captured logs:\n%s", "\n".join(lines)) raise self.exception_message(PackageExecutionError, exc, "Failed package execution.") # FIXME: this won't be necessary using async routine (https://github.com/crim-ca/weaver/issues/131) self.insert_package_log(result) try: self.make_outputs(result) self.update_status("Generate package outputs done.", PACKAGE_PROGRESS_PREP_OUT, STATUS_RUNNING) except Exception as exc: raise self.exception_message(PackageExecutionError, exc, "Failed to save package outputs.") except Exception: # return log file location by status message since outputs are not obtained by WPS failed process log_url = "{}/{}.log".format(get_wps_output_url(self.settings), self.uuid) error_msg = "Package completed with errors. Server logs: [{}], Available at [{}]:".format( self.log_file, log_url ) self.update_status(error_msg, self.percent, STATUS_FAILED) raise else: self.update_status("Package complete.", PACKAGE_PROGRESS_DONE, STATUS_SUCCEEDED) return self.response def must_fetch(self, input_ref): # type: (str) -> bool """ Figures out if file reference should be fetched immediately for local execution. If anything else than local script/docker, remote ADES/WPS process will fetch it. S3 are handled here to avoid error on remote WPS not supporting it. .. seealso:: - :ref:`File Reference Types` """ if self.remote_execution or self.package_type == PROCESS_WORKFLOW: return False app_req = get_application_requirement(self.package) if app_req["class"] in CWL_REQUIREMENT_APP_REMOTE: if input_ref.startswith("s3://"): return True return False return not os.path.isfile(input_ref) def make_inputs(self, wps_inputs, # type: Dict[str, Deque[WPS_Input_Type]] cwl_inputs_info, # type: Dict[str, CWL_Input_Type] ): # type: (...) -> Dict[str, ValueType] """ Converts WPS input values to corresponding CWL input values for processing by CWL package instance. The WPS inputs must correspond to :mod:`pywps` definitions. Multiple values are adapted to arrays as needed. WPS ``Complex`` types (files) are converted to appropriate locations based on data or reference specification. :param wps_inputs: actual WPS inputs parsed from execution request :param cwl_inputs_info: expected CWL input definitions for mapping :return: CWL input values """ cwl_inputs = dict() for input_id in wps_inputs: # skip empty inputs (if that is even possible...) input_occurs = wps_inputs[input_id] if len(input_occurs) <= 0: continue # process single occurrences input_i = input_occurs[0] # handle as reference/data is_array, elem_type, _, _ = is_cwl_array_type(cwl_inputs_info[input_id]) if isinstance(input_i, ComplexInput) or elem_type == "File": # extend array data that allow max_occur > 1 # drop invalid inputs returned as None if is_array: input_href = [self.make_location_input(elem_type, input_def) for input_def in input_occurs] input_href = [cwl_input for cwl_input in input_href if cwl_input is not None] else: input_href = self.make_location_input(elem_type, input_i) if input_href: cwl_inputs[input_id] = input_href elif isinstance(input_i, (LiteralInput, BoundingBoxInput)): # extend array data that allow max_occur > 1 if is_array: input_data = [i.url if i.as_reference else i.data for i in input_occurs] else: input_data = input_i.url if input_i.as_reference else input_i.data cwl_inputs[input_id] = input_data else: raise PackageTypeError("Undefined package input for execution: {}.".format(type(input_i))) return cwl_inputs def make_location_input(self, input_type, input_definition): # type: (str, ComplexInput) -> Optional[JSON] """ Generates the JSON content required to specify a `CWL` ``File`` input definition from a location. If the input reference corresponds to an HTTP URL that is detected as matching the local WPS output endpoint, implicitly convert the reference to the local WPS output directory to avoid useless download of available file. Since that endpoint could be protected though, perform a minimal HEAD request to validate its accessibility. Otherwise, this operation could incorrectly grant unauthorized access to protected files by forging the URL. If the process
<gh_stars>0 from __future__ import print_function import sys import os import re import math import argparse import webbrowser import random import copy if sys.version_info < (3,): import ConfigParser as configparser import StringIO from urllib2 import urlopen as urlopen, HTTPError from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler else: import configparser import io from urllib.request import urlopen as urlopen from urllib.error import HTTPError from http.server import HTTPServer, BaseHTTPRequestHandler try: import sqlite3 sqlite3_available = True except: sqlite3_available = False try: import xml.etree.cElementTree as ET except: import xml.etree.ElementTree as ET from PIL import Image, ImageDraw from time import time, sleep, strftime, gmtime IDENTITY = """\ kahelo - tile management for GPS maps - kahelo.godrago.net\ """ VERSION = '1.10' LICENSE = """\ Copyright (c) 2014-2022 <NAME> (gilles dot arcas at gmail dot com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ # -- Constants --------------------------------------------------------------- APPNAME = 'kahelo' MAXZOOM = 18 # -- Command line parsing ---------------------------------------------------- USAGE = """ -describe <db name> [-db_format <db format] [-tile_format <tile format>] [-url_template <url template>] -insert <db name> <tileset> [-force] -import <db name> <tileset> [-force] -source <db name> -export <db name> <tileset> [-force] -dest <db name> -delete <db name> <tileset> -view <db name> <tileset> [-image <image name>] -count <db name> <tileset> -stat <db name> <tileset> -server <db name> tileset: -track <track_filename> -zoom <zoom_level> [-radius <in kilometers>] -tracks <track_filename> -zoom <zoom_level> [-radius <in kilometers>] -contour <track_filename> -zoom <zoom_level> [-radius <in kilometers>] -contours <track_filename> -zoom <zoom_level> [-radius <in kilometers>] -disk <point coordinates> -project <project_filename> -records [-zoom <zoom_level>] -tiles xmin,ymin,xmax,ymax -zoom <zoom_level> -inside limits tilesets to the intersection with the argument database -zoom 1-14,16/12 zoom levels 1 to 14 and 16, level 12 subdivised into higher levels -radius n or n,n,..., if multiple values, must be one value per zoom interval or value url template examples: OpenStreetMap: http://[abc].tile.openstreetmap.org/{z}/{x}/{y}.png may be abbreviated as OpenStreetMap MapQuest: http://otile[1234].mqcdn.com/tiles/1.0.0/osm/{z}/{x}/{y}.jpg may be abbreviated as MapQuest full help: APPNAME.html\ """ class ArgumentParser(argparse.ArgumentParser): def __init__(self): usage = USAGE.replace('APPNAME', APPNAME) argparse.ArgumentParser.__init__(self, usage=usage, add_help=False) group = self.add_argument_group('Information') group.add_argument('-version', action='store_true', help='print version number', dest='do_version') group.add_argument('-license', action='store_true', help='display text of license', dest='do_license') group.add_argument('-help', action='store_true', help='show this help message', dest='do_help') group.add_argument('-Help', action='store_true', help='open html help page', dest='do_helphtml') group.add_argument('-verbose', action='store', help='detailed feedback', dest='verbose', nargs='?', const=3, default=None) group.add_argument('-quiet', action='store_true', help='minimal feedback', dest='quiet') agroup = self.add_argument_group('Commands') xgroup = agroup.add_mutually_exclusive_group() xgroup.add_argument('-describe', metavar='db_name', action='store', dest='db_describe', help='set database properties') xgroup.add_argument('-insert', metavar='db_name', action='store', dest='db_insert', help='download and insert tiles in database') xgroup.add_argument('-import', metavar='db_name', action='store', dest='db_import', help='import tiles') xgroup.add_argument('-export', metavar='db_name', action='store', dest='db_export', help='export tiles') xgroup.add_argument('-delete', metavar='db_name', action='store', dest='db_delete', help='delete tiles') xgroup.add_argument('-count', metavar='db_name', action='store', dest='db_count' , help='count tiles') xgroup.add_argument('-view', metavar='db_name', action='store', dest='db_view' , help='make an image from tiles') xgroup.add_argument('-server', metavar='db_name', action='store', dest='db_server', help='connect to dabase through http') xgroup.add_argument('-stat', metavar='db_name', action='store', dest='db_stat' , help='statistics') agroup = self.add_argument_group('Database properties') if sqlite3_available: db_ids = ('maverick', 'folder', 'rmaps', 'kahelo') else: db_ids = ('maverick', 'folder') img_ids = ('png', 'jpg', 'server') agroup.add_argument('-db_format' , action='store', dest='db_format', choices=db_ids) agroup.add_argument('-tile_format' , action='store', dest='tile_format', choices=img_ids) agroup.add_argument('-url_template', action='store', dest='url_template', help='url template for tile server') agroup = self.add_argument_group('Tile database source and destination') agroup.add_argument('-source' , metavar='db_name', action='store', dest='db_source', help='source database') agroup.add_argument('-destination', metavar='db_name', action='store', dest='db_dest' , help='destination database') agroup = self.add_argument_group('Tile source') xgroup = agroup.add_mutually_exclusive_group() xgroup.add_argument('-track' , action='store', dest='track', help='track filename') xgroup.add_argument('-tracks' , action='store', dest='tracks', help='track filename') xgroup.add_argument('-contour' , action='store', dest='contour', help='contour filename') xgroup.add_argument('-contours', action='store', dest='contours', help='contour filename') xgroup.add_argument('-disk' , action='store', dest='disk', help='point coordinates') xgroup.add_argument('-project' , action='store', dest='project', help='project filename') xgroup.add_argument('-records' , action='store_true', dest='db_tiles', help='tiles from database') xgroup.add_argument('-tiles' , action='store', dest='coord_tiles', help='tile coordinates') agroup.add_argument('-zoom' , action='store', dest='zoom', help='zoom 0-%d' % MAXZOOM) agroup.add_argument('-radius' , action='store', dest='radius', help='include disk radius in km') agroup.add_argument('-inside' , action='store_true', dest='inside', help='limit tilesets to intersection with database') agroup = self.add_argument_group('Other parameters') agroup.add_argument('-force' , action='store_true', dest='force_insert', help='force insertion into database') agroup.add_argument('-image' , action='store', dest='image', help='name of output image') def error(self, message): error(message) def parse_args(self, argstring=None): if argstring is None: options = argparse.ArgumentParser.parse_args(self) else: options = argparse.ArgumentParser.parse_args(self, argstring.split()) # nothing more to do if help or version if options.do_version or options.do_license or options.do_help or options.do_helphtml: return options # upper case constant argument values if options.db_format is not None: options.db_format = options.db_format.upper() if options.tile_format is not None: options.tile_format = options.tile_format.upper() # verbosity if options.quiet: options.verbosity = 0 elif not options.verbose: options.verbosity = 1 else: options.verbosity = int(options.verbose) # add db_name attribute options.db_name = (options.db_describe or options.db_count or options.db_insert or options.db_import or options.db_export or options.db_delete or options.db_view or options.db_stat or options.db_server or None) # expand url aliases if options.url_template == 'OpenStreetMap': options.url_template = r'http://[abc].tile.openstreetmap.org/{z}/{x}/{y}.png' if options.url_template == 'MapQuest': options.url_template = r'http://otile[1234].mqcdn.com/tiles/1.0.0/osm/{z}/{x}/{y}.jpg' # nothing more to do for -describe or -server if options.db_describe or options.db_server: return options complete_source(options) return options def complete_source(options): # set tile generator and tile origin if options.track: options.tile_generator, options.tile_source = tile_track_generator, options.track elif options.tracks: options.tile_generator, options.tile_source = tile_tracks_generator, options.tracks elif options.contour: options.tile_generator, options.tile_source = tile_contour_generator, options.contour elif options.contours: options.tile_generator, options.tile_source = tile_contours_generator, options.contours elif options.disk: options.tile_generator, options.tile_source = tile_disk_generator, options.disk elif options.project: options.tile_generator, options.tile_source = tile_project_generator, options.project elif options.db_tiles: options.tile_generator, options.tile_source = db_tiles_generator, None elif options.coord_tiles: options.tile_generator, options.tile_source = coord_tiles_generator, options.coord_tiles else: error('source is missing ') # replace tile coordinate string with integer coordinates if options.coord_tiles: try: options.coord_tiles = [int(x) for x in options.coord_tiles.split(',')] except: error('incorrect tile rectangle coordinates (xmin,ymin,xmax,ymax)') # replace zoom string with list of zoom values zoom_arg = options.zoom if options.zoom is None: if options.project: options.zoom = list(range(MAXZOOM + 1)) elif options.db_tiles: options.zoom = list(range(MAXZOOM + 1)) else: error('zoom must be given') else: options.zoom, options.zoom_limit = decode_range_ex(options.zoom) if options.zoom is None or not all(0 <= n <= MAXZOOM for n in options.zoom): error('zoom values must be integers between 0 and %d' % MAXZOOM) if (options.zoom_limit is None or not (0 <= options.zoom_limit <= MAXZOOM or options.zoom_limit == 1000)): error('zoom limit must be an integer between 0 and %d' % MAXZOOM) # replace radius argument with float value or list of float values (if multiple radius) if options.radius is None: pass else: options.radius = decode_radius(options.radius, zoom_arg) # used to find gpx files in path of project options.project_filename = None class ProjectParser(argparse.ArgumentParser): def __init__(self): argparse.ArgumentParser.__init__(self) group = self.add_mutually_exclusive_group() group.add_argument('-track' , action='store', dest='track') group.add_argument('-tracks' , action='store', dest='tracks') group.add_argument('-contour' , action='store', dest='contour') group.add_argument('-contours', action='store', dest='contours') group.add_argument('-disk' , action='store', dest='disk') group.add_argument('-project' , action='store', dest='project') group.add_argument('-records' , action='store_true', dest='db_tiles') group.add_argument('-tiles' , action='store', dest='coord_tiles') self.add_argument('-zoom' , action='store', dest='zoom') self.add_argument('-radius' , action='store', dest='radius') self.add_argument('-inside' , action='store_true', dest='inside') group.add_argument('-verbose', action='store', dest='verbose', nargs='?', const=3, default=None) self.add_argument('-quiet', action='store_true', dest='quiet') def error(self, msg): error('incorrect project syntax: ' + msg) def parse_args(self, arglist): options = argparse.ArgumentParser.parse_args(self, arglist) # verbosity if options.quiet: options.verbosity = 0 elif options.verbose is None: options.verbosity = 1 else: options.verbosity = int(options.verbose) complete_source(options) return options def decode_range(s): """Decode a range string into a list of integers: 8-10,12,14 --> [8, 9, 10, 12, 14] """ R = [] for x in s.split(','): m = re.search(r'(\d+)-(\d+)', x) if m: i1 = int(m.group(1)) i2 = int(m.group(2)) R.extend(list(range(i1, i2 + 1))) elif x.isdigit(): R.append(int(x)) else: return None return R def decode_range_ex(s): """Decode a zoom argument: 8-10,12,14/12 --> [8, 9, 10, 12, 14], 12 """ if ('/') not in s: return decode_range(s), 1000 else: zoom_range, zoom_limit = s.split('/') dec_range = decode_range(zoom_range) dec_limit = int(zoom_limit) if zoom_limit.isdigit() else None return dec_range, dec_limit def decode_radius(sr, sz): """Decode a radius argument: sz=8-10,12,14/12 and sr=10,5,2 --> [10, 10, 10, 5, 2] """ # no multiple radius if ',' not in sr: try: x
math.cos(0.47240363594 + 24499.0740637739 * self.t) X3 += 0.00000000000 * math.cos(4.58808593083 + 2119.00767786191 * self.t) X3 += 0.00000000000 * math.cos(1.93271006548 + 52179.9314359899 * self.t) X3 += 0.00000000000 * math.cos(2.71699794579 + 27043.2590656993 * self.t) X3 += 0.00000000000 * math.cos(0.78321130217 + 29530.7219040231 * self.t) X3 += 0.00000000000 * math.cos(0.87395244120 + 20426.32727493849 * self.t) X3 += 0.00000000000 * math.cos(0.22412464998 + 26514.7451499337 * self.t) X3 += 0.00000000000 * math.cos(2.09910746178 + 38654.2986590405 * self.t) X3 += 0.00000000000 * math.cos(0.37782872130 + 51066.18391357149 * self.t) X3 += 0.00000000000 * math.cos(6.08588015961 + 63498.71419893629 * self.t) X3 += 0.00000000000 * math.cos(3.06180705468 + 234790.88445668427 * self.t) X3 += 0.00000000000 * math.cos(0.44816354598 + 110013.18843293248 * self.t) X3 += 0.00000000000 * math.cos(3.76757290021 + 52195.71986153169 * self.t) X3 += 0.00000000000 * math.cos(0.26388289924 + 76674.88034692229 * self.t) X3 += 0.00000000000 * math.cos(3.06411835882 + 4551.7096795753 * self.t) X3 += 0.00000000000 * math.cos(1.55289005756 + 7.3573644843 * self.t) X3 += 0.00000000000 * math.cos(5.69100090266 + 33326.8225506577 * self.t) X3 += 0.00000000000 * math.cos(4.84238249166 + 2218.51328670329 * self.t) X3 += 0.00000000000 * math.cos(5.21804461989 + 51749.45190975589 * self.t) X3 += 0.00000000000 * math.cos(0.07928724753 + 7238.4317741165 * self.t) X3 += 0.00000000000 * math.cos(0.41819802690 + 52172.1687652739 * self.t) X3 += 0.00000000000 * math.cos(1.04835331170 + 53284.94101775829 * self.t) X3 += 0.00000000000 * math.cos(0.31150101949 + 66941.2891439017 * self.t) X3 += 0.00000000000 * math.cos(1.30179196383 + 52168.93655363109 * self.t) X3 += 0.00000000000 * math.cos(3.59073128026 + 58946.76070187749 * self.t) X3 += 0.00000000000 * math.cos(5.69460606394 + 30639.61282114949 * self.t) X3 += 0.00000000000 * math.cos(3.09211354360 + 62389.33564684289 * self.t) X3 += 0.00000000000 * math.cos(3.64742441695 + 105411.23831396949 * self.t) X3 += 0.00000000000 * math.cos(2.56174511433 + 103292.47445359109 * self.t) X3 += 0.00000000000 * math.cos(3.50117859238 + 23868.9022199039 * self.t) # Mercury_X4 (t) // 42 terms of order 4 X4 = 0 X4 += 0.00000043303 * math.cos(2.70854317703 + 26088.1469590577 * self.t) X4 += 0.00000016746 * math.cos(2.85109602051 + 0.2438174835 * self.t) X4 += 0.00000005097 * math.cos(5.82035608585 + 52176.0501006319 * self.t) X4 += 0.00000001110 * math.cos(2.85416500039 + 78263.95324220609 * self.t) X4 += 0.00000000968 * math.cos(3.18837996543 + 26087.65932409069 * self.t) X4 += 0.00000000362 * math.cos(0.10660634045 + 104351.85638378029 * self.t) X4 += 0.00000000161 * math.cos(3.56341896847 + 130439.75952535449 * self.t) X4 += 0.00000000075 * math.cos(0.55603774289 + 156527.66266692868 * self.t) X4 += 0.00000000076 * math.cos(6.07252064875 + 52175.56246566489 * self.t) X4 += 0.00000000033 * math.cos(3.72826875982 + 182615.56580850288 * self.t) X4 += 0.00000000014 * math.cos(2.40716156772 + 78263.46560723908 * self.t) X4 += 0.00000000014 * math.cos(0.56399976721 + 208703.46895007708 * self.t) X4 += 0.00000000005 * math.cos(3.66056233199 + 234791.37209165128 * self.t) X4 += 0.00000000004 * math.cos(5.13174489459 + 104351.36874881329 * self.t) X4 += 0.00000000002 * math.cos(2.87490875788 + 24978.7684069643 * self.t) X4 += 0.00000000002 * math.cos(5.84989349950 + 27197.5255111511 * self.t) X4 += 0.00000000002 * math.cos(0.45146054812 + 260879.27523322542 * self.t) X4 += 0.00000000001 * math.cos(1.74376603369 + 130439.27189038748 * self.t) X4 += 0.00000000001 * math.cos(3.51635178690 + 286967.17837479962 * self.t) X4 += 0.00000000001 * math.cos(0.06797049294 + 51066.6715485385 * self.t) X4 += 0.00000000001 * math.cos(1.33488273309 + 1059.6257476727 * self.t) X4 += 0.00000000001 * math.cos(0.19743580633 + 20426.8149099055 * self.t) X4 += 0.00000000000 * math.cos(4.73344633686 + 156527.17503196168 * self.t) X4 += 0.00000000000 * math.cos(3.03361722015 + 53285.4286527253 * self.t) X4 += 0.00000000000 * math.cos(0.28697199826 + 1109.1347346099 * self.t) X4 += 0.00000000000 * math.cos(2.12246327493 + 31749.4790082099 * self.t) X4 += 0.00000000000 * math.cos(3.62335756262 + 77154.57469011268 * self.t) X4 += 0.00000000000 * math.cos(5.27675905248 + 4552.1973145423 * self.t) X4 += 0.00000000000 * math.cos(0.97938730115 + 51116.6681704427 * self.t) X4 += 0.00000000000 * math.cos(2.90146638781 + 5661.0882316687 * self.t) X4 += 0.00000000000 * math.cos(3.37193339699 + 21536.1934619989 * self.t) X4 += 0.00000000000 * math.cos(1.47858093383 + 182615.07817353586 * self.t) X4 += 0.00000000000 * math.cos(3.30664239042 + 46514.7180514797 * self.t) X4 += 0.00000000000 * math.cos(2.38442876069 + 1589.3167127673 * self.t) X4 += 0.00000000000 * math.cos(4.43080795748 + 27147.5288892469 * self.t) X4 += 0.00000000000 * math.cos(5.33267823849 + 25132.5472174491 * self.t) X4 += 0.00000000000 * math.cos(0.89978296758 + 26617.8379241523 * self.t) X4 += 0.00000000000 * math.cos(4.07365069575 + 14765.48286075331 * self.t) X4 += 0.00000000000 * math.cos(1.63297628773 + 25558.4559939631 * self.t) X4 += 0.00000000000 * math.cos(3.35368510638 + 27043.7467006663 * self.t) X4 += 0.00000000000 * math.cos(4.71304947972 + 529.9347825781 * self.t) X4 += 0.00000000000 * math.cos(5.15326778299 + 57837.3821497841 * self.t) # Mercury_X5 (t) // 16 terms of order 5 X5 = 0 X5 += 0.00000000414 * math.cos(4.09017660105 + 0.2438174835 * self.t) X5 += 0.00000000327 * math.cos(2.83894329980 + 26088.1469590577 * self.t) X5 += 0.00000000134 * math.cos(4.51536199764 + 52176.0501006319 * self.t) X5 += 0.00000000046 * math.cos(1.23267980717 + 78263.95324220609 * self.t) X5 += 0.00000000016 * math.cos(4.45794773259 + 104351.85638378029 * self.t) X5 += 0.00000000007 * math.cos(5.57199695746 + 26087.65932409069 * self.t) X5 += 0.00000000006 * math.cos(1.55726840151 + 130439.75952535449 * self.t) X5 += 0.00000000003 * math.cos(4.94192470876 + 156527.66266692868 * self.t) X5 += 0.00000000001 * math.cos(1.93929522999 + 182615.56580850288 * self.t) X5 += 0.00000000001 * math.cos(4.16271885947 + 52175.56246566489 * self.t) X5 += 0.00000000001 * math.cos(5.10948519211 + 208703.46895007708 * self.t) X5 += 0.00000000000 * math.cos(0.99915185773 + 78263.46560723908 * self.t) X5 += 0.00000000000 * math.cos(1.90802069686 + 234791.37209165128 * self.t) X5 += 0.00000000000 * math.cos(3.92135630671 + 104351.36874881329 * self.t) X5 += 0.00000000000 * math.cos(1.30030594321 + 24978.7684069643 * self.t) X5 += 0.00000000000 * math.cos(0.96944549328 + 27197.5255111511 * self.t) X = (X0+ X1*self.t+ X2*self.t*self.t+ X3*self.t*self.t*self.t+ X4*self.t*self.t*self.t*self.t+ X5*self.t*self.t*self.t*self.t*self.t) # Mercury_Y0 (t) // 1853 terms of order 0 Y0 = 0 Y0 += 0.37749277893 * math.cos(2.83179506899 + 26088.1469590577 * self.t) Y0 += 0.11918926148 * math.cos(2.91948125760 + 0.2438174835 * self.t) Y0 += 0.03840153904 * math.cos(5.88254544140 + 52176.0501006319 * self.t) Y0 += 0.00585979278 * math.cos(2.65010770289 + 78263.95324220609 * self.t) Y0 += 0.00305833424 * math.cos(3.67378306016 + 26087.65932409069 * self.t) Y0 += 0.00105974941 * math.cos(5.70085415459 + 104351.85638378029 * self.t) Y0 += 0.00024906132 * math.cos(0.55066770933 + 52175.56246566489 * self.t) Y0 += 0.00021056065 * math.cos(2.46841472315 + 130439.75952535449 * self.t) Y0 += 0.00004441671 * math.cos(5.51916065495 + 156527.66266692868 * self.t) Y0 += 0.00003347442 * math.cos(3.67952842081 + 78263.46560723908 * self.t) Y0 += 0.00000976617 * math.cos(2.28672075339 + 182615.56580850288 * self.t) Y0 += 0.00000703903 * math.cos(2.88960689088 + 24978.7684069643 * self.t) Y0 += 0.00000640528 * math.cos(5.91381352466 + 27197.5255111511 * self.t) Y0 += 0.00000558149 * math.cos(0.50510861725 + 104351.36874881329 * self.t) Y0 += 0.00000447775 * math.cos(1.40511442556 + 1059.6257476727 * self.t) Y0 += 0.00000400281 * math.cos(0.29865270517 + 20426.8149099055 * self.t) Y0 += 0.00000283882 * math.cos(2.22271091529 + 31749.4790082099 * self.t) Y0 += 0.00000191007 * math.cos(2.70458121832 + 53285.4286527253 * self.t) Y0 += 0.00000181454 * math.cos(0.23779467119 + 1109.1347346099 * self.t) Y0 += 0.00000193895 * math.cos(5.38872313263 + 4552.1973145423 * self.t) Y0 += 0.00000221371 * math.cos(5.33746575111 + 208703.46895007708 * self.t) Y0 += 0.00000139001 * math.cos(5.94193853313 + 51066.6715485385 * self.t) Y0 += 0.00000183933 * math.cos(2.76579938720 + 5661.0882316687 * self.t) Y0 += 0.00000153303 * math.cos(1.11372235868 + 51116.6681704427 * self.t) Y0 += 0.00000104855 * math.cos(3.60039284031 + 130439.27189038748 * self.t) Y0 += 0.00000115202 * math.cos(5.27930045344 + 57837.3821497841 * self.t) Y0 += 0.00000090628 * math.cos(4.97031050861 + 529.9347825781 * self.t) Y0 += 0.00000075318 * math.cos(4.50136079719 + 27147.5288892469 * self.t) Y0 += 0.00000072957 * math.cos(3.43332933363 + 21536.1934619989 * self.t) Y0 += 0.00000074803 * math.cos(3.35834807468 + 46514.7180514797 * self.t) Y0 += 0.00000068710 * math.cos(4.91618713593 + 25132.5472174491 * self.t) Y0 += 0.00000083504 * math.cos(1.60859812856 + 10213.5293636945 * self.t) Y0 += 0.00000060985 * math.cos(3.88048699951 + 27043.7467006663 * self.t) Y0 += 0.00000057987 * math.cos(4.13400021557 + 14765.48286075331 * self.t) Y0 += 0.00000060953 * math.cos(3.43164694662 + 47624.0966035731 * self.t) Y0 += 0.00000053356 * math.cos(0.72676419246 + 26617.8379241523 * self.t) Y0 += 0.00000054964 * math.cos(3.74064645856 + 12566.3955174663 * self.t) Y0 += 0.00000047881 * math.cos(5.75865881516 + 79373.33179429949 * self.t) Y0 += 0.00000051263 * math.cos(1.96665333979 + 426.8420083595 * self.t) Y0 += 0.00000051361 * math.cos(2.10502555376 + 234791.37209165128 * self.t) Y0 += 0.00000050374 * math.cos(2.03207321104 + 25558.4559939631 * self.t) Y0 += 0.00000048449 * math.cos(1.94914263411 + 1589.3167127673 * self.t) Y0 += 0.00000048867 * math.cos(4.16469048525 + 77204.57131201689 * self.t) Y0 += 0.00000034739 * math.cos(4.11430856351 + 51646.3591355373 * self.t) Y0 += 0.00000032851 * math.cos(4.56963272977 + 955.3559241251 * self.t) Y0 += 0.00000044998 * math.cos(0.91167682267 + 41962.7645544209 * self.t) Y0 += 0.00000031618 * math.cos(4.66914248946 + 37410.8110573621 * self.t) Y0 += 0.00000028634 * math.cos(5.42748718604 + 30640.1004561165 * self.t) Y0 += 0.00000031480 * math.cos(2.70995810641 + 77154.57469011268 * self.t) Y0 += 0.00000032102 * math.cos(2.04798101755 + 83925.28529135829
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sun Jun 10 15:52:02 2018 @author: branko """ import tensorflow as tf import numpy as np from tensorflow.examples.tutorials.mnist import input_data from utils import tile_raster_images import math import matplotlib.pyplot as plt plt.rcParams['image.cmap'] = 'jet' mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images,\ mnist.test.labels # vizualizacija jedne rekonstrukcije s postepenim dodavanjem doprinosa aktivnih skrivenih elemenata def sigmoid(x): return 1 / (1 + np.exp(-x)) def weights(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias(shape): initial = tf.zeros(shape, dtype=tf.float32) return tf.Variable(initial) def sample_prob(probs): """Uzorkovanje vektora x prema vektoru vjerojatnosti p(x=1) = probs""" return tf.to_float(tf.random_uniform(tf.shape(probs)) <= probs) def draw_weights(W, shape, N, stat_shape, interpolation="bilinear"): """Vizualizacija težina W -- vektori težina shape -- tuple dimenzije za 2D prikaz težina - obično dimenzije ulazne slike, npr. (28,28) N -- broj vektora težina shape_state -- dimezije za 2D prikaz stanja (npr. za 100 stanja (10,10) """ image = (tile_raster_images( X=W.T, img_shape=shape, tile_shape=(int(math.ceil(N/stat_shape[0])), stat_shape[0]), tile_spacing=(1, 1))) plt.figure(figsize=(10, 14)) plt.imshow(image, interpolation=interpolation) plt.axis('off') def draw_reconstructions(ins, outs, states, shape_in, shape_state, N): """Vizualizacija ulaza i pripadajućih rekonstrukcija i stanja skrivenog sloja ins -- ualzni vektori outs -- rekonstruirani vektori states -- vektori stanja skrivenog sloja shape_in -- dimezije ulaznih slika npr. (28,28) shape_state -- dimezije za 2D prikaz stanja (npr. za 100 stanja (10,10) N -- broj uzoraka """ plt.figure(figsize=(8, int(2 * N))) for i in range(N): plt.subplot(N, 4, 4*i + 1) plt.imshow(ins[i].reshape(shape_in), vmin=0, vmax=1, interpolation="nearest") plt.title("Test input") plt.axis('off') plt.subplot(N, 4, 4*i + 2) plt.imshow(outs[i][0:784].reshape(shape_in), vmin=0, vmax=1, interpolation="nearest") plt.title("Reconstruction") plt.axis('off') plt.subplot(N, 4, 4*i + 3) plt.imshow(states[i].reshape(shape_state), vmin=0, vmax=1, interpolation="nearest") plt.title("States") plt.axis('off') plt.tight_layout() def draw_generated(stin, stout, gen, shape_gen, shape_state, N): """Vizualizacija zadanih skrivenih stanja, konačnih skrivenih stanja i pripadajućih rekonstrukcija stin -- početni skriveni sloj stout -- rekonstruirani vektori gen -- vektori stanja skrivenog sloja shape_gen -- dimezije ulaznih slika npr. (28,28) shape_state -- dimezije za 2D prikaz stanja (npr. za 100 stanja (10,10) N -- broj uzoraka """ plt.figure(figsize=(8, int(2 * N))) for i in range(N): plt.subplot(N, 4, 4*i + 1) plt.imshow(stin[i].reshape(shape_state), vmin=0, vmax=1, interpolation="nearest") plt.title("set state") plt.axis('off') plt.subplot(N, 4, 4*i + 2) plt.imshow(stout[i][0:784].reshape(shape_state), vmin=0, vmax=1, interpolation="nearest") plt.title("final state") plt.axis('off') plt.subplot(N, 4, 4*i + 3) plt.imshow(gen[i].reshape(shape_gen), vmin=0, vmax=1, interpolation="nearest") plt.title("generated visible") plt.axis('off') plt.tight_layout() Nh = 100 # Broj elemenata prvog skrivenog sloja h1_shape = (10,10) Nv = 784 # Broj elemenata vidljivog sloja v_shape = (28,28) Nu = 5000 # Broj uzoraka za vizualizaciju rekonstrukcije gibbs_sampling_steps = 1 alpha = 0.1 g1 = tf.Graph() with g1.as_default(): X1 = tf.placeholder("float", [None, 784]) w1 = weights([Nv, Nh]) vb1 = bias([Nv]) hb1 = bias([Nh]) a1 = tf.matmul(X1,w1) h0_prob = tf.sigmoid(hb1 + a1) # h0_prob = tf.reduce_prod(h0_prob,axis=1) # h0_prob =tf.zeros(shape=[tf.shape(h0_prob)[0], 100],dtype=tf.float32) + tf.reshape(h0_prob, [-1,1]) h0 = sample_prob(h0_prob) h1 = h0 for step in range(gibbs_sampling_steps): v1_prob = tf.nn.sigmoid(tf.matmul( h1, tf.transpose(w1)) +vb1) # v1_prob = tf.reduce_prod(v1_prob, axis=1) # v1_prob = tf.zeros(shape=[tf.shape(v1_prob)[0], 784],dtype=tf.float32) + tf.reshape(v1_prob, [-1,1]) v1 = sample_prob(v1_prob) a1 = tf.matmul(v1,w1) h1_prob = tf.sigmoid(hb1 + a1) # h1_prob = tf.reduce_prod(h1_prob, axis=1) # h1_prob = tf.zeros(shape=[tf.shape(h1_prob)[0], 100],dtype=tf.float32) + tf.reshape(h1_prob, [-1,1]) h1 = sample_prob(h1_prob) w1_positive_grad = tf.matmul(tf.transpose(X1),h0) w1_negative_grad = tf.matmul(tf.transpose(v1),h1) dw1 = (w1_positive_grad - w1_negative_grad) / tf.to_float(tf.shape(X1)[0]) update_w1 = tf.assign_add(w1, alpha * dw1) update_vb1 = tf.assign_add(vb1, alpha * tf.reduce_mean(X1 - v1, 0)) update_hb1 = tf.assign_add(hb1, alpha * tf.reduce_mean(h0 - h1, 0)) out1 = (update_w1, update_vb1, update_hb1) v1_prob = tf.nn.sigmoid(tf.matmul( h1, tf.transpose(w1)) +vb1) # v1_prob = tf.reduce_prod(v1_prob, axis=1) # v1_prob = tf.zeros(shape=[tf.shape(v1_prob)[0], 784],dtype=tf.float32) + tf.reshape(v1_prob, [-1,1]) v1 = sample_prob(v1_prob) err1 = X1 - v1_prob err_sum1 = tf.reduce_mean(err1 * err1) initialize1 = tf.global_variables_initializer() batch_size = 100 epochs = 100 n_samples = mnist.train.num_examples total_batch = int(n_samples / batch_size) * epochs sess1 = tf.Session(graph=g1) sess1.run(initialize1) for i in range(total_batch): batch, label = mnist.train.next_batch(batch_size) err, _ = sess1.run([err_sum1, out1], feed_dict={X1: batch}) if i%(int(total_batch/10)) == 0: print(i, err) w1s = w1.eval(session=sess1) vb1s = vb1.eval(session=sess1) hb1s = hb1.eval(session=sess1) vr, h1s = sess1.run([v1_prob, h1], feed_dict={X1: teX[0:Nu,:]}) # vizualizacija težina draw_weights(w1s, v_shape, Nh, h1_shape) # vizualizacija rekonstrukcije i stanja draw_reconstructions(teX, vr, h1s, v_shape, h1_shape, 200) Nh2 = Nh # Broj elemenata drugog skrivenog sloja h2_shape = h1_shape gibbs_sampling_steps = 2 alpha = 0.1 g2 = tf.Graph() with g2.as_default(): X2 = tf.placeholder("float", [None, Nv]) w1a = tf.Variable(w1s) vb1a = tf.Variable(vb1s) hb1a = tf.Variable(hb1s) w2 = weights([Nh, Nh2]) hb2 = bias([Nh2]) h1up_prob = tf.nn.sigmoid( tf.matmul(X2,w1a) +hb1a) h1up = sample_prob(h1up_prob) h2up_prob = tf.nn.sigmoid( tf.matmul(h1up,w2) +hb2) h2up = sample_prob(h2up_prob) h2down = h2up for step in range(gibbs_sampling_steps): h1down_prob = tf.nn.sigmoid(tf.matmul( h2down, tf.transpose(w2)) +hb1a) h1down = sample_prob(h1down_prob) h2down_prob =tf.nn.sigmoid( tf.matmul(h1down,w2) +hb2) h2down = sample_prob(h2down_prob) w2_positive_grad = tf.matmul(tf.transpose(h1up),h2up) w2_negative_grad = tf.matmul(tf.transpose(h1down),h2down) dw2 = (w2_positive_grad - w2_negative_grad) / tf.to_float(tf.shape(h1up)[0]) update_w2 = tf.assign_add(w2, alpha * dw2) update_hb1a = tf.assign_add(hb1a, alpha * tf.reduce_mean(h1up - h1down, 0)) update_hb2 = tf.assign_add(hb2, alpha * tf.reduce_mean(h2up - h2down, 0)) out2 = (update_w2, update_hb1a, update_hb2) # rekonsturkcija ulaza na temelju krovnog skrivenog stanja h3 # ... # ... v_out_prob = tf.nn.sigmoid(tf.matmul( h1down, tf.transpose(w1a)) +vb1a) v_out = sample_prob(v_out_prob) err2 = X2 - v_out_prob err_sum2 = tf.reduce_mean(err2 * err2) initialize2 = tf.global_variables_initializer() batch_size = 100 epochs = 100 n_samples = mnist.train.num_examples total_batch = int(n_samples / batch_size) * epochs sess2 = tf.Session(graph=g2) sess2.run(initialize2) for i in range(total_batch): # iteracije treniranja #... #... if i%(int(total_batch/10)) == 0: print(i, err) w2s, hb1as, hb2s = sess2.run([w2, hb1a, hb2], feed_dict={X2: batch}) vr2, h2downs = sess2.run([v_out_prob, h2down], feed_dict={X2: teX[0:Nu,:]}) # vizualizacija težina draw_weights(w2s, h1_shape, Nh2, h2_shape, interpolation="nearest") # vizualizacija rekonstrukcije i stanja draw_reconstructions(teX, vr2, h2downs, v_shape, h2_shape, 200) # Generiranje uzoraka iz slučajnih vektora krovnog skrivenog sloja #... #... # Emulacija dodatnih Gibbsovih uzorkovanja pomoću feed_dict #... #... # beta = 0.01 g3 = tf.Graph() with g3.as_default(): X3 = tf.placeholder("float", [None, Nv]) r1_up = tf.Variable(w1s) w1_down = tf.Variable(tf.transpose(w1s)) w2a = tf.Variable(w2s) hb1_up = tf.Variable(hb1s) hb1_down = tf.Variable(hb1as) vb1_down = tf.Variable(vb1s) hb2a = tf.Variable(hb2s) # wake pass h1_up_prob = tf.nn.sigmoid( tf.matmul(X3,r1_up) +hb1_up) h1_up = sample_prob(h1_up_prob) v1_up_down_prob = tf.nn.sigmoid( tf.matmul(h1_up,w1_down) +vb1_down) v1_up_down = sample_prob(v1_up_down_prob) # top RBM Gibs passes h2_up_prob = tf.nn.sigmoid( tf.matmul(h1_up,w2a) +hb2a) h2_up = sample_prob(h2_up_prob) h2_down = h2_up for step in range(gibbs_sampling_steps): h1_down_prob = tf.nn.sigmoid(tf.matmul( h2down, tf.transpose(w2a)) +hb1_up) h1_down = sample_prob(h1_down_prob) h2_down_prob = tf.nn.sigmoid( tf.matmul(h1_down,w2a) +hb2a) h2_down = sample_prob(h2_down_prob) # sleep pass v1_down_prob = tf.nn.sigmoid( tf.matmul(h1_down,w1_down) +vb1_down) v1_down = sample_prob(v1_down_prob) h1_down_up_prob =tf.nn.sigmoid( tf.matmul(v1_down,r1_up) +hb1_up) h1_down_up = sample_prob(h1_down_up_prob) # generative weights update during wake pass update_w1_down = tf.assign_add(w1_down, beta * tf.matmul(tf.transpose(h1_up), X3 - v1_up_down_prob) / tf.to_float(tf.shape(X3)[0])) update_vb1_down = tf.assign_add(vb1_down, beta * tf.reduce_mean(X3 - v1_up_down_prob, 0)) # top RBM update w2_positive_grad = tf.matmul(tf.transpose(h1_up),h2_up) w2_negative_grad = tf.matmul(tf.transpose(h1_down),h2down) dw3 = (w2_positive_grad - w2_negative_grad) / tf.to_float(tf.shape(h1up)[0]) update_w2 = tf.assign_add(w2a, beta * dw3) update_hb1_down = tf.assign_add(hb1_down, beta * tf.reduce_mean(h1_up - h1_down, 0)) update_hb2 = tf.assign_add(hb2a, beta * tf.reduce_mean(h2_up - h2_down, 0)) # recognition weights update during sleep pass update_r1_up = tf.assign_add(r1_up, beta * tf.matmul(tf.transpose(v1_down_prob), h1_down - h1_down_up) / tf.to_float(tf.shape(X3)[0])) update_hb1_up = tf.assign_add(hb1_up, beta * tf.reduce_mean(h1_down - h1_down_up, 0)) out3 = (update_w1_down, update_vb1_down, update_w2, update_hb1_down, update_hb2, update_r1_up, update_hb1_up) err3 = X3 - v1_down_prob err_sum3 = tf.reduce_mean(err3 * err3) initialize3 = tf.global_variables_initializer() batch_size = 100 epochs = 100 n_samples = mnist.train.num_examples total_batch = int(n_samples / batch_size) * epochs sess3 = tf.Session(graph=g3) sess3.run(initialize3) for i in range(total_batch): #... err, _ = sess3.run([err_sum3, out3], feed_dict={X3: batch}) if i%(int(total_batch/10)) == 0: print(i, err) w2ss, r1_ups, w1_downs, hb2ss, hb1_ups, hb1_downs, vb1_downs = sess3.run( [w2a, r1_up, w1_down, hb2a, hb1_up, hb1_down, vb1_down], feed_dict={X3: batch}) vr3, h2_downs, h2_down_probs = sess3.run([v1_down_prob, h2_down, h2_down_prob], feed_dict={X3: teX[0:Nu,:]}) # vizualizacija težina draw_weights(r1_ups, v_shape, Nh, h1_shape) draw_weights(w1_downs.T, v_shape, Nh, h1_shape) draw_weights(w2ss, h1_shape, Nh2, h2_shape, interpolation="nearest") # vizualizacija rekonstrukcije i stanja Npics = 5 plt.figure(figsize=(8, 12*4)) for i in range(20): plt.subplot(20, Npics, Npics*i + 1) plt.imshow(teX[i].reshape(v_shape), vmin=0, vmax=1) plt.title("Test input") plt.subplot(20, Npics, Npics*i + 2) plt.imshow(vr[i][0:784].reshape(v_shape), vmin=0, vmax=1) plt.title("Reconstruction 1") plt.subplot(20, Npics, Npics*i + 3) plt.imshow(vr2[i][0:784].reshape(v_shape), vmin=0, vmax=1) plt.title("Reconstruction 2") plt.subplot(20, Npics, Npics*i + 4) plt.imshow(vr3[i][0:784].reshape(v_shape), vmin=0, vmax=1) plt.title("Reconstruction 3") plt.subplot(20, Npics, Npics*i + 5) plt.imshow(h2_downs[i][0:Nh2].reshape(h2_shape), vmin=0, vmax=1, interpolation="nearest") plt.title("Top states 3") plt.tight_layout() # Generiranje uzoraka iz slučajnih vektora krovnog skrivenog sloja #... #... # Emulacija dodatnih Gibbsovih uzorkovanja pomoću feed_dict #... #... def draw_rec(inp, title, size, Nrows, in_a_row, j): """ Iscrtavanje jedne iteracije u kreiranju vidljivog sloja inp - vidljivi sloj title - naslov sličice size - 2D dimenzije vidljiovg sloja Nrows - maks. broj redaka sličica in-a-row . broj sličica u jednom redu j - pozicija sličice u gridu """ plt.subplot(Nrows, in_a_row, j) plt.imshow(inp.reshape(size), vmin=0, vmax=1, interpolation="nearest") plt.title(title) plt.axis('off')
#! /usr/bin/env python3 """Tests for templite.""" from re import escape from templite import Templite, TempliteSyntaxError from unittest import TestCase, main # pylint: disable=W0612,E1101 # Disable W0612 (Unused variable) and # E1101 (Instance of 'foo' has no 'bar' member) class AnyOldObject(object): """Simple testing object. Use keyword arguments in the constructor to set attributes on the object. """ def __init__(self, **attrs): for n, v in attrs.items(): setattr(self, n, v) class TempliteTest(TestCase): """Tests for Templite.""" def try_render(self, text, ctx=None, result=None): """Render `text` through `ctx`, and it had better be `result`. Result defaults to None so we can shorten the calls where we expect an exception and never get to the result comparison. """ actual = Templite(text).render(ctx or {}) if result: self.assertEqual(actual, result) def assertSynErr(self, msg): pat = "^" + escape(msg) + "$" return self.assertRaisesRegexp(TempliteSyntaxError, pat) ''' def test_passthrough(self): # Strings without variables are passed through unchanged. self.assertEqual(Templite("Hello").render(), "Hello") self.assertEqual( Templite("Hello, 20% fun time!").render(), "Hello, 20% fun time!" ) def test_variables(self): # Variables use {{var}} syntax. self.try_render("Hello, {{name}}!", {'name':'Ned'}, "Hello, Ned!") def test_undefined_variables(self): # Using undefined names is an error. with self.assertRaises(Exception): self.try_render("Hi, {{name}}!") def test_pipes(self): # Variables can be filtered with pipes. data = { 'name': 'Ned', 'upper': lambda x: x.upper(), 'second': lambda x: x[1], } self.try_render("Hello, {{name|upper}}!", data, "Hello, NED!") # Pipes can be concatenated. self.try_render("Hello, {{name|upper|second}}!", data, "Hello, E!") def test_reusability(self): # A single Templite can be used more than once with different data. globs = { 'upper': lambda x: x.upper(), 'punct': '!', } template = Templite("This is {{name|upper}}{{punct}}", globs) self.assertEqual(template.render({'name':'Ned'}), "This is NED!") self.assertEqual(template.render({'name':'Ben'}), "This is BEN!") def test_attribute(self): # Variables' attributes can be accessed with dots. obj = AnyOldObject(a="Ay") self.try_render("{{obj.a}}", locals(), "Ay") obj2 = AnyOldObject(obj=obj, b="Bee") self.try_render("{{obj2.obj.a}} {{obj2.b}}", locals(), "Ay Bee") def test_member_function(self): # Variables' member functions can be used, as long as they are nullary. class WithMemberFns(AnyOldObject): """A class to try out member function access.""" def ditto(self): """Return twice the .txt attribute.""" return self.txt + self.txt obj = WithMemberFns(txt="Once") self.try_render("{{obj.ditto}}", locals(), "OnceOnce") def test_item_access(self): # Variables' items can be used. d = {'a':17, 'b':23} self.try_render("{{d.a}} < {{d.b}}", locals(), "17 < 23") def test_loops(self): # Loops work like in Django. nums = [1,2,3,4] self.try_render( "Look: {% for n in nums %}{{n}}, {% endfor %}done.", locals(), "Look: 1, 2, 3, 4, done." ) # Loop iterables can be filtered. def rev(l): """Return the reverse of `l`.""" l = l[:] l.reverse() return l self.try_render( "Look: {% for n in nums|rev %}{{n}}, {% endfor %}done.", locals(), "Look: 4, 3, 2, 1, done." ) def test_empty_loops(self): self.try_render( "Empty: {% for n in nums %}{{n}}, {% endfor %}done.", {'nums':[]}, "Empty: done." ) def test_multiline_loops(self): self.try_render( "Look: \n{% for n in nums %}\n{{n}}, \n{% endfor %}done.", {'nums':[1,2,3]}, "Look: \n\n1, \n\n2, \n\n3, \ndone." ) def test_multiple_loops(self): self.try_render( "{% for n in nums %}{{n}}{% endfor %} and " "{% for n in nums %}{{n}}{% endfor %}", {'nums': [1,2,3]}, "123 and 123" ) def test_comments(self): # Single-line comments work: self.try_render( "Hello, {# Name goes here: #}{{name}}!", {'name':'Ned'}, "Hello, Ned!" ) # and so do multi-line comments: self.try_render( "Hello, {# Name\ngoes\nhere: #}{{name}}!", {'name':'Ned'}, "Hello, Ned!" ) def test_if(self): self.try_render( "Hi, {% if ned %}NED{% endif %}{% if ben %}BEN{% endif %}!", {'ned': 1, 'ben': 0}, "Hi, NED!" ) self.try_render( "Hi, {% if ned %}NED{% endif %}{% if ben %}BEN{% endif %}!", {'ned': 0, 'ben': 1}, "Hi, BEN!" ) self.try_render( "Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!", {'ned': 0, 'ben': 0}, "Hi, !" ) self.try_render( "Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!", {'ned': 1, 'ben': 0}, "Hi, NED!" ) self.try_render( "Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!", {'ned': 1, 'ben': 1}, "Hi, NEDBEN!" ) def test_complex_if(self): class Complex(AnyOldObject): """A class to try out complex data access.""" def getit(self): """Return it.""" return self.it obj = Complex(it={'x':"Hello", 'y': 0}) self.try_render( "@" "{% if obj.getit.x %}X{% endif %}" "{% if obj.getit.y %}Y{% endif %}" "{% if obj.getit.y|str %}S{% endif %}" "!", { 'obj': obj, 'str': str }, "@XS!" ) def test_loop_if(self): self.try_render( "@{% for n in nums %}{% if n %}Z{% endif %}{{n}}{% endfor %}!", {'nums': [0,1,2]}, "@0Z1Z2!" ) self.try_render( "X{%if nums%}@{% for n in nums %}{{n}}{% endfor %}{%endif%}!", {'nums': [0,1,2]}, "X@012!" ) self.try_render( "X{%if nums%}@{% for n in nums %}{{n}}{% endfor %}{%endif%}!", {'nums': []}, "X!" ) def test_nested_loops(self): self.try_render( "@" "{% for n in nums %}" "{% for a in abc %}{{a}}{{n}}{% endfor %}" "{% endfor %}" "!", {'nums': [0,1,2], 'abc': ['a', 'b', 'c']}, "@a0b0c0a1b1c1a2b2c2!" ) def test_exception_during_evaluation(self): # TypeError: Couldn't evaluate {{ foo.bar.baz }}: # 'NoneType' object is unsubscriptable with self.assertRaises(TypeError): self.try_render( "Hey {{foo.bar.baz}} there", {'foo': None}, "Hey ??? there" ) def test_bad_names(self): with self.assertSynErr("Not a valid name: 'var%&!@'"): self.try_render("Wat: {{ var%&!@ }}") with self.assertSynErr("Not a valid name: 'filter%&!@'"): self.try_render("Wat: {{ foo|filter%&!@ }}") with self.assertSynErr("Not a valid name: '@'"): self.try_render("Wat: {% for @ in x %}{% endfor %}") def test_bogus_tag_syntax(self): with self.assertSynErr("Don't understand tag: 'bogus'"): self.try_render("Huh: {% bogus %}!!{% endbogus %}??") def test_malformed_if(self): with self.assertSynErr("Don't understand if: '{% if %}'"): self.try_render("Buh? {% if %}hi!{% endif %}") with self.assertSynErr("Don't understand if: '{% if this or that %}'"): self.try_render("Buh? {% if this or that %}hi!{% endif %}") def test_malformed_for(self): with self.assertSynErr("Don't understand for: '{% for %}'"): self.try_render("Weird: {% for %}loop{% endfor %}") with self.assertSynErr("Don't understand for: '{% for x from y %}'"): self.try_render("Weird: {% for x from y %}loop{% endfor %}") with self.assertSynErr("Don't understand for: '{% for x, y in z %}'"): self.try_render("Weird: {% for x, y in z %}loop{% endfor %}") def test_bad_nesting(self): with self.assertSynErr("Unmatched action tag: 'if'"): self.try_render("{% if x %}X") with self.assertSynErr("Mismatched end tag: 'for'"): self.try_render("{% if x %}X{% endfor %}") with self.assertSynErr("Too many ends: '{% endif %}'"): self.try_render("{% if x %}{% endif %}{% endif %}") def test_malformed_end(self): with self.assertSynErr("Don't understand end: '{% end if %}'"): self.try_render("{% if x %}X{% end if %}") with self.assertSynErr("Don't understand end: '{% endif now %}'"): self.try_render("{% if x %}X{% endif now %}") ''' def test1_fibonacci(self): def fib(n): l = [[0,1]] for _ in range(n-2): l.append(l[-1]+[sum(l[-1][-2:])]) return l self.try_render("{% for l in n|fib%}{{l|to_str|join}}\n{% endfor %}", {'fib': fib, 'n': 4, 'to_str' : lambda x: list(map(str, x)), 'join': lambda x: " ".join(x)}, "0 1\n0 1 1\n0 1 1 2\n") def test2_fibonacci(self): def fib(n): l = [[0,1]] for _ in range(n-2): l.append(l[-1]+[sum(l[-1][-2:])]) return l self.try_render("{% for l in n|fib%}{{l|to_str|join}}\n{% endfor %}", {'fib': fib, 'n': 8, 'to_str' : lambda x: list(map(str, x)), 'join': lambda x: " ".join(x)}, "0 1\n0 1 1\n0 1 1 2\n0 1 1 2 3\n0 1 1 2 3 5\n0 1 1 2 3 5 8\n0 1 1 2 3 5 8 13\n") def test3_fibonacci(self): def fib(n): l = [[0,1]] for _ in range(n-2): l.append(l[-1]+[sum(l[-1][-2:])]) return l self.try_render("{% for l in n|fib%}{{l|to_str|join}}\n{% endfor %}", {'fib': fib, 'n': 16, 'to_str' : lambda x: list(map(str, x)), 'join': lambda x: " ".join(x)}, "0 1\n0 1 1\n0 1 1 2\n0 1 1 2 3\n0 1 1 2 3 5\n0 1 1 2 3 5 8\n0 1 1 2 3 5 8 13\n0 1 1 2 3 5 8 13 21\n0 1 1 2 3 5 8 13 21 34\n0 1 1 2 3 5 8 13 21 34 55\n0 1 1 2 3 5 8 13 21 34 55 89\n0 1 1 2 3 5 8 13 21 34 55 89 144\n0 1 1 2 3 5 8 13 21 34 55 89 144 233\n0 1 1 2 3 5 8 13 21 34 55 89 144 233 377\n0 1 1 2 3 5 8 13 21 34 55 89 144 233 377 610\n") # This attempts def test1_product_listings(self): self.try_render("""{% for p in products|values %}{{p.name}}\n price: {{p.price}}\n rating: {{p.rating}}\n in-stock: {{p.in_stock}}\n{% endfor %}""", {'products':{"PID10367892567":{"name": "cell phone", "price":"$999", "rating":"4.5/5", "in_stock":"yes"}, "PID10788942312":{"name": "television", "price":"$499", "rating":"4.3/5", "in_stock":"no"}, "PID10485657534":{"name": "laptop", "price":"$399", "rating":"4.4/5", "in_stock":"yes"}}, 'format': lambda x: format(x, ">6s"), 'values': lambda x: x.values()}, "cell phone\n price:
#!/usr/bin/env python import sys import os import random import copy import time import traceback import inspect import imp #http://stackoverflow.com/questions/606561/how-to-get-filename-of-the-main-module-in-python def main_is_frozen(): return (hasattr(sys, "frozen") or # new py2exe hasattr(sys, "importers") # old py2exe or imp.is_frozen("__main__")) # tools/freeze def get_main_dir(): if main_is_frozen(): # print 'Running from path', os.path.dirname(sys.executable) return os.path.dirname(sys.executable) return os.path.dirname(os.path.realpath(__file__)) script_dir = get_main_dir() #if sys.platform == 'win32' and hasattr(sys, "frozen"): # script_dir = os.path.dirname(sys.executable) #else: # script_dir = os.path.dirname(os.path.realpath(__file__)) # ----- Handling localization import locale import gettext APP_NAME = "monorail" LOCALE_DIR = os.path.join(script_dir, "data/locale") DEFAULT_LANGUAGES = os.environ.get('LANGUAGE', '').split(':') DEFAULT_LANGUAGES += os.environ.get('LC_ALL', '').split(':') DEFAULT_LANGUAGES += os.environ.get('LC_MESSAGES', '').split(':') DEFAULT_LANGUAGES += os.environ.get('LANG', '').split(':') DEFAULT_LANGUAGES += ['en_US'] lc, encoding = locale.getdefaultlocale() if lc: languages = [lc] languages += DEFAULT_LANGUAGES mo_location = LOCALE_DIR gettext.install (True,localedir=None, unicode=1) gettext.find(APP_NAME, mo_location) gettext.textdomain (APP_NAME) gettext.bind_textdomain_codeset(APP_NAME, "UTF-8") lang = gettext.translation (APP_NAME, mo_location, languages = languages, fallback = True) lang.install() gettext.lang = lang # ----- End handle localisation import pygame from pygame.locals import * import koon.app from koon.app import Game from koon.input import UserInput, Mouse from koon.geo import Vec3D, Vec2D, Rectangle from koon.res import resman from koon.gui import ImageButton, GuiState import koon.snd as snd from menu import MonorailMenu, SingleSwitch from tiles import * from world import Level, Playfield from player import * from hud import Hud, IngameMenu from settings import * from frame import Frame from sndman import MusicManager, SoundManager import control as ctrl import event import scenarios from worldview import PlayfieldView class Monorail (Game): """The Monorail main application public members: - game_is_done: True when app should exit """ def __init__( self, configuration ): Game.__init__( self, _("Mystic Mine"), configuration ) def before_gameloop( self ): resman.read("data/resources.cfg") self.game_data = GameData( self.userinput ) # apply configuration settings SoundManager.set_sound_volume( self.config.sound_volume ) SoundManager.set_music_volume( self.config.music_volume ) # set state self.menu = MonorailMenu( self.game_data ) self.game = MonorailGame( self.game_data ) self.editor = None self.state = self.game self.state = self.menu # set window buttons self.max_button = ImageButton( copy.copy(resman.get("game.max_button")), Vec2D(800-16-4, 4) ) def do_tick( self, indev ): if indev.key.is_down(K_F5) and indev.key.is_down(K_F8) and indev.key.went_down( K_ESCAPE ): self.game_is_done = True if indev.key.is_down(K_F5) and indev.key.is_down(K_F8) and indev.key.went_down( K_e ): if self.state == self.game: level_nr = self.game_data.get_quest().get_current_level_nr() self.editor = MonorailEditor( level_nr ) self.state = self.editor elif self.state == self.editor: self.editor.save_all() self.game = MonorailGame( self.game_data ) self.state = self.game if self.state == self.game: if self.game.is_done(): # or indev.key.went_down( K_0 ): self.game.to_next_level = False if self.game.state == MonorailGame.STATE_DONE: # or indev.key.went_down( K_0 ): if self.game_data.is_single_player(): if self.game_data.get_quest().progress == self.game_data.get_quest().get_level_count() - 1: self.game_data.set_game_finished() self.game_data.get_quest().to_next_level() self.game_data.save_single_player_progress() self.state = self.menu self.menu.show_level_select() else: self.game_data.get_quest().to_next_level() self.game.restart( self.game_data ) elif self.game.state == MonorailGame.STATE_MENU: self.state = self.menu self.menu.show_main_menu() elif self.game.state == MonorailGame.STATE_QUIT: self.game_is_done = True elif self.state == self.menu: if self.menu.is_done(): if self.menu.should_quit: self.game_is_done = True else: self.state = self.game self.game.restart( self.game_data ) self.state.do_tick( indev ) # Handle maximize button self.max_button.tick( indev, None ) if self.max_button.went_down(): self.config.is_fullscreen = not self.config.is_fullscreen if not self.config.is_fullscreen: pygame.display.set_mode(self.config.resolution) else: pygame.display.set_mode(self.config.resolution, pygame.FULLSCREEN) def render( self, surface, interpol, time_sec ): self.state.draw( surface, interpol, time_sec ) self.max_button.draw( surface, interpol, time_sec ) self.state.draw_mouse( surface, interpol, time_sec ) #self.draw_fps( surface ) class MonorailGame: STATE_INTRO, STATE_BEGIN, STATE_GAME, STATE_MENU, STATE_QUIT, STATE_STATS, STATE_TOTAL,\ STATE_DONE = range( 8 ) MOUSE_TIMEOUT = 25 * 3 def __init__( self, game_data ): self.restart( game_data ) self.music_man = MusicManager() # preload clock sounds and big explosion graphic resman.get("game.clock_sound") resman.get("game.clockring_sound") resman.get("game.explosion_sprite") def restart( self, game_data ): """Start a new game with the current game_data""" self.game_data = game_data self.state = MonorailGame.STATE_INTRO self.scenario = self.game_data.get_quest().create_scenario(self.game_data.skill_level.value) self.playfield = self.scenario.playfield self.controller = ctrl.GroundControl( self.playfield ) self.init_goldcars() self.hud = Hud( self.scenario, self.controller, self.game_data ) self.hud.start_intro_screen() self.begin_timeout = 25 * 3 self.ingame_menu = None self.gui_state = GuiState() self.mouse_timeout = MonorailGame.MOUSE_TIMEOUT self.is_paused = False def init_goldcars( self ): goldcar_names = [] controllers = [] for name, controller in self.game_data.goldcars: goldcar_names.append( name ) controllers.append( controller ) for iq in self.game_data.get_quest().get_opponent_iqs(): goldcar_names.append( "" ) controllers.append( ctrl.AiController( None, iq ) ) self.playfield.add_goldcars( goldcar_names ) self.controller.add_controllers( controllers ) def do_tick( self, indev ): if self.ingame_menu is None and not self.is_paused: if self.game_data.is_single_player() or \ self.game_data.is_single_random(): SingleSwitch.feed_keys( indev ) # in singleplayer, all joystick buttons are keypress for joy in indev.joys: if joy.any_went_down(): indev.key.feed_down( K_SPACE ) if joy.any_went_up(): indev.key.feed_up( K_SPACE ) if self.state == MonorailGame.STATE_INTRO: if self.hud.is_ready(): # or self.game_data.is_single_player(): self.hud.end_info() self.state = MonorailGame.STATE_BEGIN self.music_man.play() elif self.state == MonorailGame.STATE_BEGIN: if self.begin_timeout % 50 == 0: random_spawn = not self.game_data.is_single_player(); spawns_left = self.playfield.spawn_next_goldcar( random_spawn ) if spawns_left: self.begin_timeout += 50 self.controller.game_tick( indev ) self.playfield.game_tick() # Start right away in single player if self.game_data.is_single_player(): self.scenario.game_tick() self.begin_timeout -= 1 if self.begin_timeout <= 0: self.state = MonorailGame.STATE_GAME if indev.mouse.has_moved(): self.mouse_timeout = MonorailGame.MOUSE_TIMEOUT else: self.mouse_timeout -= 1 elif self.state == MonorailGame.STATE_GAME: self.controller.game_tick( indev ) self.playfield.game_tick() self.scenario.game_tick() if self.scenario.is_finished(): if not self.game_data.is_single_player(): self.hud.start_end_screen() else: self.game_data.get_quest().save_score( self.scenario ) skill = self.game_data.get_quest().get_skill( self.scenario ) self.game_data.skill_level.update( skill ) self.game_data.save_single_player_progress() if self.scenario.has_won(): self.hud.start_win_screen() else: self.hud.start_lose_screen() self.state = MonorailGame.STATE_STATS self.mouse_timeout = MonorailGame.MOUSE_TIMEOUT self.music_man.stop() if indev.mouse.has_moved(): self.mouse_timeout = MonorailGame.MOUSE_TIMEOUT else: self.mouse_timeout -= 1 elif self.state == MonorailGame.STATE_STATS: if self.hud.is_ready(): if not self.game_data.is_single_player(): self.game_data.add_total_scores( self.playfield ) self.hud.start_total_screen() self.state = MonorailGame.STATE_TOTAL else: if self.scenario.has_won(): self.state = MonorailGame.STATE_DONE else: self.restart( self.game_data ) return elif self.state == MonorailGame.STATE_TOTAL: if self.hud.is_ready(): self.state = MonorailGame.STATE_DONE elif self.state == MonorailGame.STATE_MENU: pass self.hud.game_tick( indev ) self.music_man.game_tick() SingleSwitch.tick( indev, None ) if indev.key.went_down( K_ESCAPE ) or \ self.hud.menu_btn.went_down() or \ SingleSwitch.esc_went_down: resman.get("gui.paper_sound").play() self.ingame_menu = IngameMenu(self.game_data.is_single_player(), self.game_data) elif self.ingame_menu is not None: # Ingame Menu SingleSwitch.feed_keys( indev ) self.gui_state.update( indev, self.ingame_menu ) self.ingame_menu.tick( indev, self.gui_state ) if self.ingame_menu.is_done(): if self.ingame_menu.to_menu: self.music_man.stop() self.state = MonorailGame.STATE_MENU elif self.ingame_menu.should_quit: self.music_man.stop() self.state = MonorailGame.STATE_QUIT elif self.ingame_menu.to_next_level: self.music_man.stop() self.state = MonorailGame.STATE_DONE self.ingame_menu = None self.mouse_timeout = MonorailGame.MOUSE_TIMEOUT # if indev.key.went_down( K_p ): # self.is_paused = not self.is_paused event.Event.update() # for debugging if self.is_paused: self.controller.game_tick( indev ) def draw( self, surface, interpol, time_sec ): #surface.fill( (0,0,0) ) frame = Frame( surface, time_sec, interpol ) if self.ingame_menu is not None or self.is_paused or\ self.state not in [MonorailGame.STATE_BEGIN, MonorailGame.STATE_GAME]: frame.interpol = 0.0 frame.draw( self.playfield ) frame.draw( self.controller ) self.hud.draw( frame ) if self.ingame_menu is not None: self.ingame_menu.draw( surface ) frame.draw( event.Event.instance ) def draw_mouse( self, surface, interpol, time_sec ): if self.mouse_timeout > 0: x, y = pygame.mouse.get_pos() resman.get("gui_surf").draw( surface, Vec2D(x, y), (0,0,32,32) ) def mouse_down( self, button ): pass def is_done( self ): return self.state == MonorailGame.STATE_DONE \ or self.state == MonorailGame.STATE_MENU \ or self.state == MonorailGame.STATE_QUIT class MonorailEditor: FLAT, NORTH_SLOPE, EAST_SLOPE, SOUTH_SLOPE, WEST_SLOPE, ENTERANCE,\ ERASE, MAX = range( 8 ) X_OFFSET, Y_OFFSET = 20, 300 def __init__( self, level_nr ): self.level_nr = level_nr self.load_level() self.current_tile = MonorailEditor.FLAT self.update_edit_tiles() def load_level( self ): self.level = Level() if os.path.exists( Level.get_filename( self.level_nr ) ): self.level.load( Level.get_filename( self.level_nr ) ) def save_all( self ): self.level.save( Level.get_filename( self.level_nr ) ) def do_tick( self, indev ): self.update_tiles() self.update_edit_tiles() if pygame.mouse.get_pressed()[0]: # Left mouse button if self.current_tile in [MonorailEditor.FLAT, MonorailEditor.ENTERANCE]: self.level.set_tile( self.edit_tile1 ) elif self.current_tile in [MonorailEditor.NORTH_SLOPE, MonorailEditor.EAST_SLOPE, MonorailEditor.SOUTH_SLOPE, MonorailEditor.WEST_SLOPE]: self.level.set_tile( self.edit_tile1 ) self.level.set_tile( self.edit_tile2 ) elif self.current_tile == MonorailEditor.ERASE: self.level.remove_tile( self.edit_tile1.pos.x, self.edit_tile1.pos.y ) if indev.key.went_down( K_PAGEUP ): if self.level_nr > 0: self.save_all() self.level_nr -= 1 self.load_level() elif indev.key.went_down( K_PAGEDOWN ): self.save_all() self.level_nr += 1 self.load_level() if indev.key.went_down( K_UP ): for tile in self.level.tiles: tile.pos.y -= 1 if indev.key.went_down( K_DOWN ): for tile in self.level.tiles: tile.pos.y += 1 if indev.key.went_down( K_LEFT ): for tile in self.level.tiles: tile.pos.x -= 1 if indev.key.went_down( K_RIGHT ): for tile in self.level.tiles: tile.pos.x += 1 def draw( self, surface, interpol, time_sec ): surface.fill( (50,50,50) ) #resman.get("game.hud_left_surf").draw( surface, Vec2D(0,0) ) frame = Frame( surface, time_sec, interpol ) frame.X_OFFSET = MonorailEditor.X_OFFSET frame.Y_OFFSET = MonorailEditor.Y_OFFSET frame.draw_z( [self.level] ) if self.current_tile in [MonorailEditor.FLAT, MonorailEditor.ENTERANCE]: frame.draw( self.edit_tile1 ) elif self.current_tile in [MonorailEditor.NORTH_SLOPE ,MonorailEditor.EAST_SLOPE, MonorailEditor.SOUTH_SLOPE, MonorailEditor.WEST_SLOPE]: frame.draw( self.edit_tile1 ) frame.draw( self.edit_tile2 ) elif self.current_tile == MonorailEditor.ERASE: pass # draw filename font = pygame.font.Font( None, 24 ) render_text = font.render( Level.get_filename( self.level_nr ), 0, (255,255,255) ) surface.blit( render_text, (100,10) ) def draw_mouse( self, surface, interpol, time_sec ): x, y = pygame.mouse.get_pos() resman.get("gui_surf").draw( surface, Vec2D(x, y), (0,0,32,32) ) def mouse_down( self, button ): if button == 4: # Wheel up self.current_tile = (self.current_tile+MonorailEditor.MAX-1) % MonorailEditor.MAX if button == 5: # Wheel down self.current_tile = (self.current_tile+1) % MonorailEditor.MAX self.update_edit_tiles()
'vm_state', 'instance_type_id', 'deleted'] query_filters = [key for key in filters.iterkeys() if key in exact_match_filter_names] for filter_name in query_filters: # Do the matching and remove the filter from the dictionary # so we don't try it again below.. query_prefix = _exact_match_filter(query_prefix, filter_name, filters.pop(filter_name)) instances = query_prefix.all() if not instances: return [] # Now filter on everything else for regexp matching.. # For filters not in the list, we'll attempt to use the filter_name # as a column name in Instance.. regexp_filter_funcs = {'ip6': _regexp_filter_by_ipv6, 'ip': _regexp_filter_by_ip} for filter_name in filters.iterkeys(): filter_func = regexp_filter_funcs.get(filter_name, None) filter_re = re.compile(str(filters[filter_name])) if filter_func: filter_l = lambda instance: filter_func(instance, filter_re) elif filter_name == 'metadata': filter_l = lambda instance: _regexp_filter_by_metadata(instance, filters[filter_name]) else: filter_l = lambda instance: _regexp_filter_by_column(instance, filter_name, filter_re) instances = filter(filter_l, instances) return instances @require_context def instance_get_active_by_window(context, begin, end=None, project_id=None): """Return instances that were continuously active over window.""" session = get_session() query = session.query(models.Instance).\ filter(models.Instance.launched_at < begin) if end: query = query.filter(or_(models.Instance.terminated_at == None, models.Instance.terminated_at > end)) else: query = query.filter(models.Instance.terminated_at == None) if project_id: query = query.filter_by(project_id=project_id) return query.all() @require_admin_context def instance_get_active_by_window_joined(context, begin, end=None, project_id=None): """Return instances and joins that were continuously active over window.""" session = get_session() query = session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('instance_type')).\ filter(models.Instance.launched_at < begin) if end: query = query.filter(or_(models.Instance.terminated_at == None, models.Instance.terminated_at > end)) else: query = query.filter(models.Instance.terminated_at == None) if project_id: query = query.filter_by(project_id=project_id) return query.all() @require_admin_context def instance_get_all_by_user(context, user_id): session = get_session() return session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(user_id=user_id).\ all() @require_admin_context def instance_get_all_by_host(context, host): session = get_session() return session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(host=host).\ filter_by(deleted=can_read_deleted(context)).\ all() @require_context def instance_get_all_by_project(context, project_id): authorize_project_context(context, project_id) session = get_session() return session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(project_id=project_id).\ filter_by(deleted=can_read_deleted(context)).\ all() @require_context def instance_get_all_by_reservation(context, reservation_id): session = get_session() query = session.query(models.Instance).\ filter_by(reservation_id=reservation_id).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')) if is_admin_context(context): return query.\ filter_by(deleted=can_read_deleted(context)).\ all() elif is_user_context(context): return query.\ filter_by(project_id=context.project_id).\ filter_by(deleted=False).\ all() @require_context def instance_get_by_fixed_ip(context, address): """Return instance ref by exact match of FixedIP""" fixed_ip_ref = fixed_ip_get_by_address(context, address) return fixed_ip_ref.instance @require_context def instance_get_by_fixed_ipv6(context, address): """Return instance ref by exact match of IPv6""" session = get_session() # convert IPv6 address to mac mac = ipv6.to_mac(address) # get virtual interface vif_ref = virtual_interface_get_by_address(context, mac) # look up instance based on instance_id from vif row result = session.query(models.Instance).\ filter_by(id=vif_ref['instance_id']) return result @require_admin_context def instance_get_project_vpn(context, project_id): session = get_session() return session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(project_id=project_id).\ filter_by(image_ref=str(FLAGS.vpn_image_id)).\ filter_by(deleted=can_read_deleted(context)).\ first() @require_context def instance_get_fixed_addresses(context, instance_id): session = get_session() with session.begin(): instance_ref = instance_get(context, instance_id, session=session) try: fixed_ips = fixed_ip_get_by_instance(context, instance_id) except exception.NotFound: return [] return [fixed_ip.address for fixed_ip in fixed_ips] @require_context def instance_get_fixed_addresses_v6(context, instance_id): session = get_session() with session.begin(): # get instance instance_ref = instance_get(context, instance_id, session=session) # assume instance has 1 mac for each network associated with it # get networks associated with instance network_refs = network_get_all_by_instance(context, instance_id) # compile a list of cidr_v6 prefixes sorted by network id prefixes = [ref.cidr_v6 for ref in sorted(network_refs, key=lambda ref: ref.id)] # get vifs associated with instance vif_refs = virtual_interface_get_by_instance(context, instance_ref.id) # compile list of the mac_addresses for vifs sorted by network id macs = [vif_ref['address'] for vif_ref in sorted(vif_refs, key=lambda vif_ref: vif_ref['network_id'])] # get project id from instance project_id = instance_ref.project_id # combine prefixes, macs, and project_id into (prefix,mac,p_id) tuples prefix_mac_tuples = zip(prefixes, macs, [project_id for m in macs]) # return list containing ipv6 address for each tuple return [ipv6.to_global(*t) for t in prefix_mac_tuples] @require_context def instance_get_floating_address(context, instance_id): fixed_ip_refs = fixed_ip_get_by_instance(context, instance_id) if not fixed_ip_refs: return None # NOTE(tr3buchet): this only gets the first fixed_ip # won't find floating ips associated with other fixed_ips if not fixed_ip_refs[0].floating_ips: return None # NOTE(vish): this just returns the first floating ip return fixed_ip_refs[0].floating_ips[0]['address'] @require_context def instance_update(context, instance_id, values): session = get_session() metadata = values.get('metadata') if metadata is not None: instance_metadata_update(context, instance_id, values.pop('metadata'), delete=True) with session.begin(): if utils.is_uuid_like(instance_id): instance_ref = instance_get_by_uuid(context, instance_id, session=session) else: instance_ref = instance_get(context, instance_id, session=session) instance_ref.update(values) instance_ref.save(session=session) return instance_ref def instance_add_security_group(context, instance_id, security_group_id): """Associate the given security group with the given instance""" session = get_session() with session.begin(): instance_ref = instance_get(context, instance_id, session=session) security_group_ref = security_group_get(context, security_group_id, session=session) instance_ref.security_groups += [security_group_ref] instance_ref.save(session=session) @require_context def instance_remove_security_group(context, instance_id, security_group_id): """Disassociate the given security group from the given instance""" session = get_session() session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_id).\ filter_by(security_group_id=security_group_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def instance_action_create(context, values): """Create an instance action from the values dictionary.""" action_ref = models.InstanceActions() action_ref.update(values) session = get_session() with session.begin(): action_ref.save(session=session) return action_ref @require_admin_context def instance_get_actions(context, instance_id): """Return the actions associated to the given instance id""" session = get_session() if utils.is_uuid_like(instance_id): instance = instance_get_by_uuid(context, instance_id, session) instance_id = instance.id return session.query(models.InstanceActions).\ filter_by(instance_id=instance_id).\ all() ################### @require_context def key_pair_create(context, values): key_pair_ref = models.KeyPair() key_pair_ref.update(values) key_pair_ref.save() return key_pair_ref @require_context def key_pair_destroy(context, user_id, name): authorize_user_context(context, user_id) session = get_session() with session.begin(): key_pair_ref = key_pair_get(context, user_id, name, session=session) key_pair_ref.delete(session=session) @require_context def key_pair_destroy_all_by_user(context, user_id): authorize_user_context(context, user_id) session = get_session() with session.begin(): session.query(models.KeyPair).\ filter_by(user_id=user_id).\ update({'deleted': True, 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def key_pair_get(context, user_id, name, session=None): authorize_user_context(context, user_id) if not session: session = get_session() result = session.query(models.KeyPair).\ filter_by(user_id=user_id).\ filter_by(name=name).\ filter_by(deleted=can_read_deleted(context)).\ first() if not result: raise exception.KeypairNotFound(user_id=user_id, name=name) return result @require_context def key_pair_get_all_by_user(context, user_id): authorize_user_context(context, user_id) session = get_session() return session.query(models.KeyPair).\ filter_by(user_id=user_id).\ filter_by(deleted=False).\ all() ################### @require_admin_context def network_associate(context, project_id, force=False): """Associate a project with a network. called by project_get_networks under certain conditions and network manager add_network_to_project() only associate if the project doesn't already have a network or if force is True force solves race condition where a fresh project has multiple instance builds simultaneosly picked up by multiple network hosts which attempt to associate the project with multiple networks force should only be used as a direct consequence of user request all automated requests should not use force """ session = get_session() with session.begin(): def network_query(project_filter): return session.query(models.Network).\ filter_by(deleted=False).\ filter_by(project_id=project_filter).\ with_lockmode('update').\ first() if not force: # find out if project has a network network_ref = network_query(project_id) if force or not network_ref: # in force mode or project doesn't have a network so assocaite # with a new network # get new network network_ref = network_query(None) if not network_ref: raise db.NoMoreNetworks() # associate with network # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues network_ref['project_id'] = project_id session.add(network_ref) return network_ref @require_admin_context def network_count(context): session = get_session() return session.query(models.Network).\ filter_by(deleted=can_read_deleted(context)).\ count() @require_admin_context def network_count_allocated_ips(context, network_id): session = get_session() return session.query(models.FixedIp).\ filter_by(network_id=network_id).\ filter_by(allocated=True).\ filter_by(deleted=False).\ count() @require_admin_context def network_count_available_ips(context, network_id): session = get_session() return session.query(models.FixedIp).\ filter_by(network_id=network_id).\ filter_by(allocated=False).\ filter_by(reserved=False).\ filter_by(deleted=False).\ count() @require_admin_context def network_count_reserved_ips(context, network_id): session = get_session() return session.query(models.FixedIp).\ filter_by(network_id=network_id).\ filter_by(reserved=True).\ filter_by(deleted=False).\ count() @require_admin_context def network_create_safe(context, values): network_ref = models.Network() network_ref.update(values) try: network_ref.save() return network_ref except IntegrityError: return None @require_admin_context def network_delete_safe(context, network_id): session = get_session() with session.begin(): network_ref = network_get(context, network_id=network_id, \ session=session) session.delete(network_ref) @require_admin_context def network_disassociate(context, network_id): network_update(context, network_id, {'project_id': None, 'host': None}) @require_admin_context def network_disassociate_all(context): session = get_session() session.query(models.Network).\ update({'project_id': None, 'updated_at': literal_column('updated_at')}) @require_context def network_get(context, network_id, session=None): if not session: session = get_session() result = None if is_admin_context(context): result = session.query(models.Network).\ filter_by(id=network_id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.Network).\ filter_by(project_id=context.project_id).\ filter_by(id=network_id).\ filter_by(deleted=False).\ first() if not result: raise exception.NetworkNotFound(network_id=network_id) return result @require_admin_context def network_get_all(context): session = get_session() result = session.query(models.Network).\ filter_by(deleted=False).all() if not result: raise exception.NoNetworksFound() return result @require_admin_context def network_get_all_by_uuids(context, network_uuids, project_id=None): session = get_session() project_or_none = or_(models.Network.project_id == project_id, models.Network.project_id == None) result = session.query(models.Network).\ filter(models.Network.uuid.in_(network_uuids)).\ filter(project_or_none).\ filter_by(deleted=False).all() if not result: raise exception.NoNetworksFound() #check if host is set to all of the networks # returned in the result for network in result: if network['host'] is None: raise exception.NetworkHostNotSet(network_id=network['id']) #check if the result contains all the networks #we are looking for for network_uuid in network_uuids: found = False for network in result: if network['uuid'] == network_uuid: found = True break if not found: if project_id: raise exception.NetworkNotFoundForProject(network_uuid=uuid, project_id=context.project_id) raise exception.NetworkNotFound(network_id=network_uuid) return result # NOTE(vish): pylint complains because of the long method name, but # it fits with the names of the rest of the methods # pylint: disable=C0103 @require_admin_context def network_get_associated_fixed_ips(context, network_id): session = get_session() return session.query(models.FixedIp).\ options(joinedload_all('instance')).\ filter_by(network_id=network_id).\ filter(models.FixedIp.instance_id != None).\ filter(models.FixedIp.virtual_interface_id != None).\ filter_by(deleted=False).\ all() @require_admin_context def network_get_by_bridge(context, bridge): session = get_session() result = session.query(models.Network).\ filter_by(bridge=bridge).\ filter_by(deleted=False).\ first() if not result: raise exception.NetworkNotFoundForBridge(bridge=bridge) return result @require_admin_context def network_get_by_uuid(context, uuid): session = get_session() result = session.query(models.Network).\ filter_by(uuid=uuid).\ filter_by(deleted=False).\ first() if not result: raise exception.NetworkNotFoundForUUID(uuid=uuid) return
polling_interval self.inc_script_names = inc_script_names self._script_name_counter = 0 self._shutdown_lock = threading.Lock() self._shutdown_thread = False if client is None: client = APIClient() self.client = client # A list of ContainerFuture objects for submitted jobs. self._futures = [] def _make_future(self, job_id, run_id): """Instantiates a :class:`~civis.futures.ContainerFuture`, adds it to the internal list of futures, and returns it. This is a helper method for :func:`submit`. """ future = ContainerFuture(job_id, run_id, polling_interval=self.polling_interval, max_n_retries=self.max_n_retries, client=self.client, poll_on_creation=False) self._futures.append(future) # Return a ContainerFuture object with the job ID. return future def submit(self, fn, *args, **kwargs): """Submits a callable to be executed with the given arguments. This creates a container script with the command determined by the arguments. Parameters ---------- fn: str or callable If this is a callable, it ``fn(*args, **kwargs)`` should return a ``str`` for the command to run in docker. If ``None``, then ``_create_docker_command`` will be used. *args: args Additional arguments passed to ``fn``. arguments: dict, optional If provided, the created script will use the `arguments` dictionary from the class initialization updated with the dictionary provided to `submit`. **kwargs: kwargs Additional keyword arguments passed to ``fn``. Returns ------- :class:`~civis.futures.ContainerFuture` Note that the ``Future`` returned by ``submit`` will provide the final status of your Container Script as its ``.result()``. The user is responsible for downloading outputs produced by the script, if any. """ arguments = kwargs.pop('arguments', {}) arguments.update({'CIVIS_PARENT_JOB_ID': os.getenv('CIVIS_JOB_ID'), 'CIVIS_PARENT_RUN_ID': os.getenv('CIVIS_RUN_ID')}) with self._shutdown_lock: if self._shutdown_thread: raise RuntimeError('cannot schedule new ' 'futures after shutdown') if isinstance(fn, six.string_types): cmd = fn else: if fn is None: fn = _create_docker_command cmd = fn(*args, **kwargs) name = self.name if self.inc_script_names: name = "{} {}".format(name, self._script_name_counter) self._script_name_counter += 1 job = self._create_job(name=name, arguments=arguments, cmd=cmd) run = self.client.jobs.post_runs(job.id) log.debug('Container "{}" created with script ID {} and ' 'run ID {}'.format(name, job.id, run.id)) return self._make_future(job.id, run.id) @abstractmethod def _create_job(self, name, arguments=None, cmd=None): raise NotImplementedError("Implement in the child class") def shutdown(self, wait=True): """Wait until all Civis jobs started by this are in done states Parameters ---------- wait: bool If ``True``, then this will wait until all jobs are in a done (i.e., finished or cancelled) state. """ with self._shutdown_lock: self._shutdown_thread = True if wait: futures.wait(self._futures) def cancel_all(self): """Send cancel requests for all running Civis jobs""" for f in self._futures: # The ContainerFuture is smart enough to only cancel the run # if the run is still in progress. f.cancel() class _ContainerShellExecutor(_CivisExecutor): """Parallel computation with Container Scripts in the Civis Platform Create and run new Container Scripts in the Civis Platform. A Container Script is a command which runs inside a Docker container on the Civis Platform. Containers launched by the Executor may have either different shell commands or different arguments. This class follows the implementations in :ref:`concurrent.futures`, with necessary changes for parallelizing over different Container Script inputs rather than over functions. Jobs created through this executor will have environment variables "CIVIS_PARENT_JOB_ID" and "CIVIS_PARENT_RUN_ID" with the contents of the "CIVIS_JOB_ID" and "CIVIS_RUN_ID" of the environment which created them. If the code doesn't have "CIVIS_JOB_ID" and "CIVIS_RUN_ID" environment variables available, the child will not have "CIVIS_PARENT_JOB_ID" and "CIVIS_PARENT_RUN_ID" environment variables. .. note:: If you expect to run a large number of jobs, you may wish to set automatic retries of failed jobs (via `max_n_retries`) to protect against network and infrastructure failures. Be careful with this if your jobs cause side effects other than returning a result; retries may cause any operations executed by your jobs to be run twice. Parameters ---------- docker_image_name: str, optional The name of the Docker image to be used by Civis. You may also wish to specify a ``docker_image_tag`` in the keyword arguments. name: str, optional The name for containers in Civis. Defaults to "ContainerShellExecutorScript" followed by the date. required_resources: dict, optional A dictionary specifying what resources the job needs. See :func:`~APIClient.scripts.post_containers` for details. Defaults to 1 CPU and 1 GiB of RAM. hidden: bool, optional The hidden status of the object. Setting this to ``True`` hides it from most API endpoints. The object can still be queried directly by ID. Defaults to ``True``. max_n_retries: int, optional Retry failed jobs this many times before giving up. Retried jobs will be restarted with exactly the same parameters as they used the first time; only use this if you expect that your code is functional and errors would come from e.g. network problems. client: APIClient, optional The :class:`~civis.APIClient` object to use for interacting with the API. If not specified, a new one will be instantiated. polling_interval: int or float, optional The number of seconds between API requests to check whether a result is ready. This will be passed to the :class:`~ContainerFuture` objects that are created. You should only set this if you aren't using ``pubnub`` notifications. inc_script_names: bool, optional If ``True``, a counter will be added to the ``name`` to create the script names for each submission. **kwargs: Additional keyword arguments will be passed directly to :func:`~civis.APIClient.scripts.post_containers`. See Also -------- civis.APIClient.scripts.post_containers """ def __init__(self, docker_image_name="civisanalytics/datascience-python", name=None, required_resources=None, hidden=True, max_n_retries=0, client=None, polling_interval=None, inc_script_names=False, **kwargs): self.docker_image_name = docker_image_name self.container_kwargs = kwargs params = [{'name': 'CIVIS_PARENT_JOB_ID', 'type': 'integer', 'value': os.getenv('CIVIS_JOB_ID')}, {'name': 'CIVIS_PARENT_RUN_ID', 'type': 'integer', 'value': os.getenv('CIVIS_RUN_ID')}] self.container_kwargs.setdefault('params', []).extend(params) if required_resources is None: required_resources = {'cpu': 1024, 'memory': 1024} self.required_resources = required_resources if name is None: date_str = datetime.datetime.today().strftime("%Y-%m-%d") name = "ContainerShellExecutorScript {}".format(date_str) super().__init__(name=name, hidden=hidden, client=client, max_n_retries=max_n_retries, polling_interval=polling_interval, inc_script_names=inc_script_names) def _create_job(self, name, arguments=None, cmd=None): # Combine instance and input arguments into one dictionary. # Use `None` instead of an empty dictionary. kwargs = copy.deepcopy(self.container_kwargs) kwargs.setdefault('arguments', {}).update(arguments or {}) if not kwargs['arguments']: del kwargs['arguments'] # Submit a request to Civis to make the container script object. job = self.client.scripts.post_containers( name=name, required_resources=self.required_resources, docker_command=cmd, docker_image_name=self.docker_image_name, hidden=self.hidden, **self.container_kwargs ) return job class CustomScriptExecutor(_CivisExecutor): """Manage a pool of Custom Scripts in the Civis Platform Each Custom Script will be created from the same template, but may use different arguments. This class follows the implementations in :ref:`concurrent.futures`. If your template has settable parameters "CIVIS_PARENT_JOB_ID" and "CIVIS_PARENT_RUN_ID", then this executor will fill them with the contents of the "CIVIS_JOB_ID" and "CIVIS_RUN_ID" of the environment which created them. If the code doesn't have "CIVIS_JOB_ID" and "CIVIS_RUN_ID" environment variables available, the child will not have "CIVIS_PARENT_JOB_ID" and "CIVIS_PARENT_RUN_ID" environment variables. .. note:: If you expect to run a large number of jobs, you may wish to set automatic retries of failed jobs (via `max_n_retries`) to protect against network and infrastructure failures. Be careful with this if your jobs cause side effects other than returning a result; retries may cause any operations executed by your jobs to be run twice. Parameters ---------- from_template_id: int Create jobs as Custom Scripts from the given template ID. name: str The name for containers in Civis. hidden: bool, optional The hidden status of the object. Setting this to ``True`` hides it from most API endpoints. The object can still be queried directly by ID. Defaults to ``True``. arguments: dict, optional See :func:`~civis.APIClient.scripts.post_containers` for details. max_n_retries: int, optional Retry failed jobs this many times before giving up. Retried jobs will be restarted with exactly the same parameters as they used the first time; only use this if you expect that your code is functional and errors would come from e.g. network problems. client: APIClient, optional The :class:`~civis.APIClient` object to use for interacting with the API. If not specified, a new one will be instantiated. polling_interval: int or float, optional The number of seconds between API requests to check whether a result is ready. This will be passed to the :class:`~ContainerFuture` objects that are created. You should only set this if you aren't using ``pubnub`` notifications. inc_script_names: bool, optional If ``True``, a counter will be added to the ``name`` to create the script names for each submission. See Also -------- civis.APIClient.scripts.post_custom """ def __init__(self, from_template_id, name=None, hidden=True, arguments=None, max_n_retries=0, client=None, polling_interval=None, inc_script_names=False): self.from_template_id = from_template_id self.arguments = arguments
= el else: local_sum = self.py.add(local_sum, el) enc_b = self.get_map(b[x]) ts = self.py.add_plain(local_sum, enc_b, True) ts = self.py.square(ts) out_name = out_folder + "/square_"+str(x) ts.save(out_name) if self.verbosity: perc = int(((x+1)/w.shape[1]) * 100) print(str(perc)+"% (" + str(x+1) + "/" + str(w.shape[1]) + ")") stop = timeit.default_timer() if self.verbosity: print("First Dense: output processed in " + str(stop-start) + " s.") print("") # ========================================================================= # SECOND DENSE LAYER # ------------------------------------------------------------------------- # It is computed given the output files from first dense layer and the # weights (filters) and biases preprocessed from the model # ========================================================================= def dense2(self): if self.verbosity: print("Computing Second Dense (square)") print("==================================") input_folder = self.enclayers_dir + "/dense1/output" dense_folder = self.enclayers_dir + "/dense2" out_folder = dense_folder + "/output" wfile = "storage/layers/preprocessed/precision_"+ str(self.precision) + "/pre_3_dense_10.npy" bfile = "storage/layers/preprocessed/precision_"+ str(self.precision) + "/pre_bias_3_dense_10.npy" if not path.exists(dense_folder): createDir(dense_folder) if path.exists(out_folder): print("Processed before. You can found it in " + out_folder + " folder.") print("") elif not path.exists(wfile) or not path.exists(bfile): raise Exception("Second dense layer weights and biases need to be preprocessed before (with precision "+ str(self.precision)+ ").") elif not path.exists(input_folder): raise Exception("First dense output required. Please run Encryption.dense1(...) before.") else: createDir(out_folder) w = np.load(wfile) b = np.load(bfile) if w.shape[1] != b.shape[0]: raise Exception("Preprocessed weights "+ str(w.shape) +" and biases "+ str(b.shape) + "are incopatible.") if self.verbosity: print("Second Dense: output processing...") print("0%") start = timeit.default_timer() for x in range(w.shape[1]): local_sum = None for i in range(w.shape[0]): fname = input_folder + "/square_" + str(i) p = PyCtxt() p.load(fname,'batch') encw = self.get_map(w[i][x]) el = self.py.multiply_plain(p, encw, True) if(local_sum == None): local_sum = el else: local_sum = self.py.add(local_sum, el) enc_b = self.get_map(b[x]) ts = self.py.add_plain(local_sum, enc_b, True) ts = self.py.square(ts) out_name = out_folder + "/square_"+str(x) ts.save(out_name) if self.verbosity: perc = int(((x+1)/w.shape[1]) * 100) print(str(perc)+"% (" + str(x+1) + "/" + str(w.shape[1]) + ")") stop = timeit.default_timer() if self.verbosity: print("Second Dense: output processed in " + str(stop-start) + " s.") print("") # ========================================================================= # FULLY CONNECTED LAYER # ------------------------------------------------------------------------- # It is computed given the output files from the second dense layer and the # weights (filters) and biases preprocessed from the model # ========================================================================= def fully_connected(self): if self.verbosity: print("Computing Fully Connected") print("==================================") input_folder = self.enclayers_dir + "/dense2/output" fc_folder = self.enclayers_dir + "/fullyconnected" out_folder = fc_folder + "/output" wfile = "storage/layers/preprocessed/precision_"+ str(self.precision) + "/pre_4_dense_11.npy" bfile = "storage/layers/preprocessed/precision_"+ str(self.precision) + "/pre_bias_4_dense_11.npy" if not path.exists(fc_folder): createDir(fc_folder) if path.exists(out_folder): print("Processed before. You can found it in " + out_folder + " folder.") print("") elif not path.exists(wfile) or not path.exists(bfile): raise Exception("Fully connected layer weights and biases need to be preprocessed before (with precision "+ str(self.precision)+ ").") elif not path.exists(input_folder): raise Exception("Second dense output required. Please run Encryption.dense2(...) before.") else: createDir(out_folder) w = np.load(wfile) b = np.load(bfile) if w.shape[1] != b.shape[0]: raise Exception("Preprocessed weights "+ str(w.shape) +" and biases "+ str(b.shape) + "are incopatible.") if self.verbosity: print("Fully Connected: output processing...") print("0%") start = timeit.default_timer() for x in range(w.shape[1]): local_sum = None for i in range(w.shape[0]): fname = input_folder + "/square_" + str(i) p = PyCtxt() p.load(fname,'batch') encw = self.get_map(w[i][x]) el = self.py.multiply_plain(p, encw, True) if(local_sum == None): local_sum = el else: local_sum = self.py.add(local_sum, el) enc_b = self.get_map(b[x]) ts = self.py.add_plain(local_sum, enc_b, True) out_name = out_folder + "/fc_"+str(x) ts.save(out_name) if self.verbosity: perc = int(((x+1)/w.shape[1]) * 100) print(str(perc)+"% (" + str(x+1) + "/" + str(w.shape[1]) + ")") stop = timeit.default_timer() if self.verbosity: print("Fully Connected: output processed in " + str(stop-start) + " s.") print("") def get_results(self, test_labels): dense_folder = self.enclayers_dir + "/fullyconnected" out_folder = dense_folder + "/output" el = [] for i in range(test_labels.shape[1]): file = out_folder + "/fc_"+str(i) p = PyCtxt() p.load(file,'batch') ptxt = self.py.decrypt(p) ptxt = self.py.decodeBatch(ptxt) if(el.__len__() <= i): el.append([]) for j in range(ptxt.__len__()): if(el.__len__() <= j): el.append([ptxt[j]]) else: el[j].append(ptxt[j]) return np.array(el) def predict(self, test_labels): if self.verbosity: print("Computing Prediction") print("==================================") fc_folder = self.enclayers_dir + "/fullyconnected" out_folder = fc_folder + "/output" if not path.exists(out_folder): raise Exception("You need to compute the fully connected layer before.") print(test_labels[0]) # Only q predictions are done simultaneously # for i in range(self.n) el = [] start = timeit.default_timer() for i in range(test_labels.shape[1]): file = out_folder + "/fc_"+str(i) p = PyCtxt() p.load(file,'batch') ptxt = self.py.decrypt(p) ptxt = self.py.decodeBatch(ptxt) ptxt = self.decode_tensor(ptxt, self.t, self.precision) if(el.__len__() <= i): el.append([]) for j in range(ptxt.__len__()): if(el.__len__() <= j): el.append([ptxt[j]]) else: el[j].append(ptxt[j]) el = np.array(el) print(el.shape) print(el[0]) pos = 0 for i in range(el.shape[0]): mp = np.argmax(el[i]) ml = np.argmax(test_labels[i]) if(mp == ml): pos+=1 stop = timeit.default_timer() print("Computation time: " + str(stop-start) + " s.") print("Positive prediction: " + str(pos)) print("Negative prediction: " + str(self.n - pos)) acc = (pos/self.n) * 100 print("Model Accurancy:" + str(acc) + "%") def _encode_(self, to_encode, t, precision): """ Check encode for the given value: Admitted intervals: + : [0, t/2] - : [(t/2)+1, t] ==> [-((t/2)+1), 0] Ex: positive: [0,982384640] ==> [0,982384640] ==> [0, t/2] negative: [-982384640, 0] ==> [982384641, 1964769281] ==> [(t/2)+1, t] """ precision = pow(10, precision) val = round((to_encode * precision)) t2 = t/2 if val < 0: minval = -(t2+1) if val < minval: raise Exception("The value to encode (" + str(val) + ") is smaller than -((t/2)+1) = " + str(minval)) else: return (t + val) else: if val > t2: raise Exception("The value to encode (" + str(val) + ") is larger than t/2 = " + str(t2)) else: return val def _decode_(self, to_decode, t, precision): """ Decode the value encoded with _encode_ """ t2 = t/2 if to_decode > t2: return (to_decode-t) / pow(10, precision) else: return to_decode / pow(10, precision) def decode_tensor(self, tensor, t, precision): ret = [] for i in range(tensor.__len__()): ret.append(self._decode_(tensor[i], t, precision)) return np.array(ret) def encrypt_input(self, get_result = False): """ Encrypt the input layer generating one file per encrypted pixel index """ pre_input_file = self.preprocess_dir + "precision_" + str(self.precision) + "/pre_input.npy" if not path.exists(pre_input_file): raise Exception("Preprocessed input not found in " + pre_input_file + " please run Encryption.preprocess before.") input_folder = self.enclayers_dir + "/input" if path.exists(input_folder): print("Input layer encrypted before. You can found it in: " + input_folder) if not get_result: return None createDir(input_folder) pre_input = np.load(self.preprocess_dir + "precision_" + str(self.precision) + "/pre_input.npy") if self.verbosity: print("") print("Encrypting (preprocessed) input layer with shape " + str(pre_input.shape)+"...") input_dim, dim, dim1 = pre_input.shape pre_flat = pre_input.flatten() arr = [] pixel_arr_dim = dim*dim1 for x in range(pre_flat.__len__()): if x < pixel_arr_dim: arr.append([pre_flat[x]]) else: arr[(x % pixel_arr_dim)].append(pre_flat[x]) arr = np.array(arr) enc = [] for i in range(arr.shape[0]): fname = input_folder+'/pixel_'+ str(i) + ".pyctxt" enc.append(self._enc_arr_(arr[i], fname)) if self.verbosity: print("Input layer encrypted with success in " + str(enc.__len__()) + " files (one per pixel)") return np.array(enc) def getEncryptedPixel(self, index): pixel_file = self.enclayers_dir + "/input/pixel_" + str(index) + ".pyctxt" p = PyCtxt() p.load(pixel_file,'batch') return p def _encode_arr_(self, arr): if not self.py.getflagBatch() : raise Exception("You need to initialize Batch for this context.") res = [] for x in range(self.n): res.append(arr[x]) res = np.array(res) encoded = self.py.encodeBatch(res) return encoded def _enc_arr_(self, arr, file_name = None): if not self.py.getflagBatch() : raise Exception("You need to initialize Batch for this context.") if file_name != None: if
<reponame>Sascha0912/SAIL import numpy as np import pandas as pd from sail.sobol2indx import sobol2indx from sail.sobol_lib import i4_sobol_generate from sail.initialSampling import initialSampling from sail.createPredictionMap import createPredictionMap from sail.getValidInds import getValidInds from gaussianProcess.trainGP import trainGP from domain.rastrigin.rastrigin_CreateAcqFunc import rastrigin_CreateAcqFunc from domain.rastrigin.rastrigin_PreciseEvaluate import rastrigin_PreciseEvaluate from domain.cube.cube_CreateAcqFunc import cube_CreateAcqFunc from domain.cube.cube_PreciseEvaluate import cube_PreciseEvaluate from domain.wheelcase.wheelcase_CreateAcqFunc import wheelcase_CreateAcqFunc # from domain.wheelcase.wheelcase_DummyPreciseEvaluate import wheelcase_DummyPreciseEvaluate from domain.wheelcase.wheelcase_PreciseEvaluate import wheelcase_PreciseEvaluate from mapElites.createMap import createMap from mapElites.nicheCompete import nicheCompete from mapElites.updateMap import updateMap from mapElites.mapElites import mapElites from visualization.viewMap import viewMap import time from pprint import pprint def sail(p,d): # domain and params # def scale(value): # # return (value - 0)/(1-0)*(0.2 - (-0.2)) + (-0.2) # DOMAINCHANGE # return (value - 0)/(1-0)*(4-0)+0 def scale1(value): return (value - 0)/(1-0)*(4-0)+0 # DOMAINCHANGE def scale2(value): return (value - 0)/(1-0)*(0.2-0)+0 # DOMAINCHANGE # SOBOL settings (adjust also in initialSampling) skip = 1000 seq_size = 20000 def feval(funcName,*args): return eval(funcName)(*args) # Produce initial samples if ~d.loadInitialSamples: # print("d") # pprint(vars(d)) # print("p") # pprint(vars(p)) # print("d") # print(d) # print("p.nInitlasmaples") # print(p.nInitialSamples) observation, value = initialSampling(d,p.nInitialSamples) print("DEBUG1: observation") print(observation) # print("DEBUG2: value") # print(value) else: np.load(d.initialSampleSource) # e.g. npz-File csv randomPick = np.random.permutation(observation.shape[0])[:p.initialSamples] # take only first "initialSamples" values observation = observation[randomPick,:] # get rows with indexes from randomPick value = value[randomPick,:] # same for value nSamples = observation.shape[0] # Acquisition loop trainingTime = [] illumTime = [] peTime = [] predMap = [] # print("value") # print(value) percImproved = pd.DataFrame() acqMapRecord = pd.DataFrame() confContribution = pd.DataFrame() gpModel = [] while nSamples <= p.nTotalSamples: # Create surrogate and acquisition function # Surrogate models are created from all evaluated samples, and these # models are used to produce acquisition function. print('PE ' + str(nSamples) + ' | Training Surrogate Models') tstart = time.time() # time calc # print("value") # print(value) # print("value.shape[1]: " + str(value.shape)) # print("d.gpParams.shape: " + str(np.shape(d.gpParams))) for iModel in range(0,value.shape[1]): # TODO: only first case relevant # only retrain model parameters every 'p.trainingMod' iterations # if (nSamples == p.nInitialSamples or np.remainder(nSamples, p.trainingMod * p.nAdditionalSamples)): gpModel.insert(iModel,trainGP(observation, value.loc[:,iModel], d.gpParams[iModel])) # print("Model") # print(gpModel[iModel]) # else: # gpModel.insert(iModel,trainGP(observation, value.loc[:,iModel], d.gpParams[iModel], functionEvals=0)) # pass # Save found model parameters and update acquisition function for iModel in range(0,value.shape[1]): gpModelDict = gpModel[iModel].to_dict() # print("gModelDict") # print(gpModelDict) d.gpParams[iModel].dict = gpModelDict # d.gpParams[iModel] = gpModel[iModel] # d.gpParams[iModel].hyp = gpModel[iModel].hyp # See pyGPs hyp # d.gpParams[iModel].k = gpModel[iModel].kernel # d.gpParams[iModel].meanfunc = gpModel[iModel].mean # d.gpParams[iModel].lik = gpModel[iModel].likelihood acqFunction = feval(d.createAcqFunction, gpModel, d) # Data Gathering (training Time) tEnd = time.time() trainingTime.append(tEnd - tstart) # time calc # Create intermediate prediction map for analysis if ~np.remainder(nSamples, p.data_mapEvalMod) and p.data_mapEval: print('PE: ' + str(nSamples) + ' | Illuminating Prediction Map') predMap[nSamples], x = createPredictionMap(gpModel, observation, p, d, 'featureRes', p.data_predMapRes, 'nGens', 2*p.nGens) # 2. Illuminate Acquisition Map # A map is constructed using the evaluated samples which are evaluated # with the acquisition function and placed in the map as the initial # population. The observed samples are the seed population of the # 'acquisition map' which is then created by optimizing the acquisition # function with MAP-Elites. if nSamples == p.nTotalSamples: break # After final model is created no more infill is necessary print('PE: ' + str(nSamples) + ' | Illuminating Acquisition Map') tstart = time.time() # Evaluate observation set with acquisition function # print("DEBUG3: observation") # print(observation) fitness, predValues = acqFunction(observation) # print("DEBUG4: fitness") # print(fitness) # print("DEBUG5: predValues") # print(predValues) # Place best samples in acquisition map obsMap = createMap(d.featureRes, d.dof, d.featureMin, d.featureMax, d.extraMapValues) # obsMap contains only nans # print("obsMap") # print(obsMap[0].genes) # print("observation") # print(observation) # print("fitness") # print(fitness) # print("DEBUG6: obsMap") # print(obsMap) # print("d") # print(d) replaced, replacement, x = nicheCompete(observation, fitness, obsMap, d) # print("DEBUG7: replaced") # print(replaced) # print("DEBUG8: replacement") # print(replacement) # print("x") # print(x) obsMap = updateMap(replaced, replacement, obsMap, fitness, observation, predValues, d.extraMapValues) # print("DEBUG9: obsMap.genes") # print(obsMap[0].genes) # OK # exit() # Illuminate with MAP-Elites # print("acqFunc") # print(acqFunction) # print("obsMap") # print(obsMap) # print("p") # print(p) # print("d") # print(d) acqMap, percImp, h = mapElites(acqFunction, obsMap, p, d) # print("DEBUG10: acqMap") # print(acqMap) # print("DEBUG11: percImp") # print(percImp) # print("h") # print(h) # exit() # Workaround for acqMap if (isinstance(acqMap,tuple)): if (isinstance(acqMap[0], tuple)): acqMap = acqMap[0][0] else: acqMap = acqMap[0] # viewMap(acqMap,d) percImproved[nSamples] = percImp # ok # print("percImproved") # print(percImproved) percImproved.to_csv('percImproved.csv') # Data Gathering (illum Time) tEnd = time.time() illumTime.append(tEnd - tstart) # time calc # print("acqMap") # pprint(vars(acqMap)) acqMapRecord.at[0,nSamples] = acqMap # print("acqMap.confidence") # print(acqMap.confidence) # print("fitness_flattened") # print(fitness_flattened) # print("acqMap.fitness") # print(acqMap.fitness) fitness_flattened = acqMap.fitness.flatten('F') # DEBUG # for i in zip(acqMap.confidence, fitness_flattened): # print(i) abs_fitness = [abs(val) for val in fitness_flattened] # print((acqMap.confidence * d.varCoef) / abs_fitness) confContribution.at[0,nSamples] = np.nanmedian( (acqMap.confidence * d.varCoef) / abs_fitness) # print("nanmedian") # works # print(np.nanmedian( (acqMap.confidence * d.varCoef) / abs_fitness)) # print("confContribution") # print(confContribution) # 3. Select infill Samples # The next samples to be tested are chosen from the acquisition map: a # sobol sequence is used to evenly sample the map in the feature # dimensions. When evaluated solutions don't converge or the chosen bin # is empty the next bin in the sobol set is chosen. print('PE: ' + str(nSamples) + ' | Evaluating New Samples') tstart = time.time() # At first iteration initialize sobol sequence for sample selection if nSamples == p.nInitialSamples: sobSet = i4_sobol_generate(d.nDims,20000,1000).transpose() sobSet = pd.DataFrame(data=sobSet) sobSet = sobSet.sample(frac=1).reset_index(drop=True) sobPoint = 1 # TODO: ADDED: Scaling # sobSet = sobSet.applymap(scale) # for wheelcase: first column (0 - 0.4) second column (0 0.2) sobSet[0] = sobSet[0].apply(scale1) sobSet[1] = sobSet[1].apply(scale2) # Choose new samples and evaluate them for new observations nMissing = p.nAdditionalSamples newValue = pd.DataFrame() newSample = pd.DataFrame() indPool = pd.DataFrame() while nMissing > 0: # Evenly sample solutions from acquisition map newSampleRange = list(range(sobPoint-1, sobPoint + p.nAdditionalSamples-1)) # print("DEBUG12: newSampleRange") # print(newSampleRange) x, binIndx = sobol2indx(sobSet, newSampleRange, d, acqMap.edges) # print("DEBUG13: binIndxAfter") # print(binIndx) # print("DEBUG14: acqMap.genes") # print(acqMap.genes) for iGenes in range(0,binIndx.shape[0]): for gen in range(len(acqMap.genes)): indPool.at[iGenes,gen] = acqMap.genes[gen].iloc[binIndx.iloc[iGenes,0],binIndx.iloc[iGenes,1]] # indPool.at[iGenes,0] = acqMap.genes[0].iloc[binIndx.iloc[iGenes,0],binIndx.iloc[iGenes,1]] # indPool.at[iGenes,1] = acqMap.genes[1].iloc[binIndx.iloc[iGenes,0],binIndx.iloc[iGenes,1]] # print("DEBUG15: indPool") # print(indPool) # print("DEBUG16: observation") # print(observation) # for iGenes in range(0,binIndx.shape[0]): # indPool[iGenes,:] = acqMap.genes[binIndx.iloc[iGenes,0], binIndx.iloc[iGenes,1], :] # Remove repeats and nans (empty bins) # repeats in case of rastrigin: almost impossible? ds1 = set([tuple(line) for line in indPool.values]) ds2 = set([tuple(line) for line in observation.values]) indPool = pd.DataFrame(data=list(ds1.difference(ds2))) indPool.dropna(inplace=True) # ok indPool.reset_index(drop=True, inplace=True) # print("DEBUG17: indPool after") # print(indPool) # indPool = np.setdiff1d(indPool,observation) # 'rows','stable' ? # indPool = indPool[:] # ~any(isnan(indPool),2) # Evaluate enough of these valid solutions to get your initial sample set peFunction = lambda x: feval(d.preciseEvaluate, x, d) # returns nan if not converged # print("indPool") # print(indPool) # print("DEBUG18: peFunction") # print(peFunction) # print("nMissing") # print(nMissing) foundSample, foundValue, nMissing, x = getValidInds(indPool, peFunction, nMissing) # print("DEBUG19: foundSample") # print(foundSample) # print("newSample") # print(newSample) # print("foundSample") # print(foundSample) # newSample = [[newSample], [foundSample]] newSample = newSample.append(foundSample, ignore_index=True) # print("newSample") # print(newSample) # print("newValue") # print(newValue) newValue = newValue.append(foundValue, ignore_index=True) # newValue = [[newValue], [foundValue]] # print("newValue") # print(newValue) # Advance sobol sequence sobPoint = sobPoint + p.nAdditionalSamples + 1 # Assign found values value = value.append(newValue, ignore_index=True) # value = [value, newValue] # cat # print("value") # print(value) observation = observation.append(newSample, ignore_index=True) # print("observation335") # print(observation) # observation = [observation, newSample] # cat nSamples = np.shape(observation)[0] if len(observation) != len(np.unique(observation, axis=0)): print('WARNING: duplicate samples in observation set.') tEnd = time.time() peTime.append(tEnd - tstart) # End Acquisition loop class Output: def __init__(self, p, d, model, trainTime, illum, petime, percImproved, predMap, acqMap, confContrib, unpack): self.p = p self.d = d self.model = model self.trainTime = trainTime self.illum = illum self.petime = petime self.percImproved = percImproved self.predMap = predMap self.acqMap = acqMap self.confContrib = confContrib self.unpack = unpack # Save relevant Data output = Output(p, d, gpModel, trainingTime, illumTime, peTime, percImproved, predMap, acqMapRecord, confContribution, '') # pprint(vars(output)) # viewMap(output.acqMap.at[0,190],d) # output.p = p
selectedPlugin = models.Plugin.objects.get( name=pluginName, selected=True, active=True ) pluginDict = { "id": selectedPlugin.id, "name": selectedPlugin.name, "version": selectedPlugin.version, "userInput": userInput, "features": [], } except models.Plugin.DoesNotExist: pluginDict = { "id": 9999, "name": pluginName, "version": "1.0", "userInput": userInput, "features": [], } return pluginDict def get_tvc_plugin_dict(configuration): """tvc: variantCaller""" userInput = {"meta": {"configuration": configuration}} return _get_plugin_dict("variantCaller", userInput) def get_mca_plugin_dict(configuration): """mca: molecularCoverageAnalysis""" userInput = {"meta": {"configuration": configuration}} return _get_plugin_dict("molecularCoverageAnalysis", userInput) def simple_compare_dict(dict1, dict2): """ accepts multi-level dictionaries compares values as strings, will not report type mismatch """ if sorted(dict1.keys()) != sorted(dict2.keys()): return False for key, value in list(dict1.items()): if isinstance(value, dict): if not simple_compare_dict(value, dict2[key]): return False elif isinstance(value, list): if sorted(value) != sorted(dict2[key]): return False elif str(value) != str(dict2[key]): return False return True @cache_template_params def add_or_update_sys_template(templateParams, isSystemDefault=False): # when debug is on, do nothing if _OFFCYCLE_DEBUG: return None, False, False sysTemplate = None isCreated = False isUpdated = False planDisplayedName = templateParams.templateName if not planDisplayedName: return sysTemplate, isCreated, isUpdated planName = planDisplayedName.replace(" ", "_") currentTime = datetime.datetime.now() applicationGroup_objs = models.ApplicationGroup.objects.filter( name__iexact=templateParams.applicationGroup ) applicationGroup_obj = None if applicationGroup_objs: applicationGroup_obj = applicationGroup_objs[0] sampleGrouping_obj = None if templateParams.sampleGrouping: sampleGrouping_objs = models.SampleGroupType_CV.objects.filter( displayedName__iexact=templateParams.sampleGrouping ) if sampleGrouping_objs: sampleGrouping_obj = sampleGrouping_objs[0] sysTemplate, isCreated = models.PlannedExperiment.objects.get_or_create( isSystemDefault=isSystemDefault, isSystem=True, isReusable=True, isPlanGroup=False, planDisplayedName=planDisplayedName, planName=planName, defaults={ "planStatus": templateParams.planStatus, "runMode": "single", "isReverseRun": False, "planExecuted": False, "runType": templateParams.runType, "usePreBeadfind": templateParams.usePreBeadfind, "usePostBeadfind": templateParams.usePostBeadfind, "preAnalysis": True, "planPGM": "", "templatingKitName": templateParams.templatingKitName, "controlSequencekitname": templateParams.controlSequencekitname, "samplePrepKitName": templateParams.samplePrepKitName, "metaData": "", "date": currentTime, "applicationGroup": applicationGroup_obj, "sampleGrouping": sampleGrouping_obj, "categories": templateParams.categories, "libraryReadLength": templateParams.libraryReadLength, "samplePrepProtocol": templateParams.samplePrepProtocol, "irworkflow": templateParams.irworkflow, }, ) if isCreated: print( "...Created System template.id=%d; name=%s; isSystemDefault=%s" % (sysTemplate.id, sysTemplate.planDisplayedName, str(isSystemDefault)) ) else: hasChanges = False if sysTemplate.libraryReadLength != templateParams.libraryReadLength: print( ">>> DIFF: orig sysTemplate.libraryReadLength=%s for system template.id=%d; name=%s" % (sysTemplate.libraryReadLength, sysTemplate.id, sysTemplate.planName) ) sysTemplate.libraryReadLength = templateParams.libraryReadLength hasChanges = True if sysTemplate.planStatus not in ["planned", "inactive"]: print( ">>> DIFF: orig sysTemplate.planStatus=%s not supported for system template.id=%d; name=%s" % (sysTemplate.planStatus, sysTemplate.id, sysTemplate.planName) ) sysTemplate.planStatus = "planned" hasChanges = True else: if sysTemplate.planStatus != templateParams.planStatus: print( ">>> DIFF: orig sysTemplate.planStatus=%s for system template.id=%d; name=%s" % (sysTemplate.planStatus, sysTemplate.id, sysTemplate.planName) ) sysTemplate.planStatus = templateParams.planStatus hasChanges = True if sysTemplate.planExecuted: print( ">>> DIFF: orig sysTemplate.planExecuted=%s for system template.id=%d; name=%s" % (sysTemplate.planExecuted, sysTemplate.id, sysTemplate.planName) ) sysTemplate.planExecuted = False hasChanges = True if sysTemplate.runType != templateParams.runType: print( ">>> DIFF: orig sysTemplate.runType=%s for system template.id=%d; name=%s" % (sysTemplate.runType, sysTemplate.id, sysTemplate.planName) ) sysTemplate.runType = templateParams.runType hasChanges = True if sysTemplate.templatingKitName != templateParams.templatingKitName: print( ">>> DIFF: orig sysTemplate.templatingKitName=%s for system template.id=%d; name=%s" % (sysTemplate.templatingKitName, sysTemplate.id, sysTemplate.planName) ) sysTemplate.templatingKitName = templateParams.templatingKitName hasChanges = True if sysTemplate.controlSequencekitname != templateParams.controlSequencekitname: print( ">>> DIFF: orig sysTemplate.controlSequencekitname=%s for system template.id=%d; name=%s" % ( sysTemplate.controlSequencekitname, sysTemplate.id, sysTemplate.planName, ) ) sysTemplate.controlSequencekitname = templateParams.controlSequencekitname hasChanges = True if sysTemplate.samplePrepKitName != templateParams.samplePrepKitName: print( ">>> DIFF: orig sysTemplate.samplePrepKitName=%s for system template.id=%d; name=%s" % (sysTemplate.samplePrepKitName, sysTemplate.id, sysTemplate.planName) ) sysTemplate.samplePrepKitName = templateParams.samplePrepKitName hasChanges = True if sysTemplate.applicationGroup != applicationGroup_obj: print( ">>> DIFF: orig sysTemplate.applicationGroup=%s for system template.id=%d; name=%s" % (sysTemplate.applicationGroup, sysTemplate.id, sysTemplate.planName) ) sysTemplate.applicationGroup = applicationGroup_obj hasChanges = True if sysTemplate.sampleGrouping != sampleGrouping_obj: print( ">>> DIFF: orig sysTemplate.sampleGrouping=%s for system template.id=%d; name=%s" % (sysTemplate.sampleGrouping, sysTemplate.id, sysTemplate.planName) ) sysTemplate.sampleGrouping = sampleGrouping_obj hasChanges = True if sysTemplate.categories != templateParams.categories: print( ">>> DIFF: orig sysTemplate.categories=%s new categories=%s for system template.id=%d; name=%s" % ( sysTemplate.categories, templateParams.categories, sysTemplate.id, sysTemplate.planName, ) ) sysTemplate.categories = templateParams.categories hasChanges = True if sysTemplate.irworkflow != templateParams.irworkflow: print( ">>> DIFF: orig sysTemplate.irworkflow=%s for system template.id=%d; name=%s" % (sysTemplate.irworkflow, sysTemplate.id, sysTemplate.planName) ) sysTemplate.irworkflow = templateParams.irworkflow hasChanges = True if sysTemplate.samplePrepProtocol != templateParams.samplePrepProtocol: print( ">>>DIFF: orig sysTemplate.samplePrepProtocol=%s new samplePrepProtocol=%s for system template.id=%d; name=%s" % ( sysTemplate.samplePrepProtocol, templateParams.samplePrepProtocol, sysTemplate.id, sysTemplate.planName, ) ) sysTemplate.samplePrepProtocol = templateParams.samplePrepProtocol hasChanges = True if hasChanges: sysTemplate.date = currentTime sysTemplate.save() isUpdated = True if isUpdated: print( "...Updated System template.id=%d; name=%s" % (sysTemplate.id, sysTemplate.planDisplayedName) ) if not isCreated and not isUpdated: print( "...No changes in plannedExperiment for System template.id=%d; name=%s" % (sysTemplate.id, sysTemplate.planDisplayedName) ) return sysTemplate, isCreated, isUpdated def add_or_update_default_system_templates(): # system default templates # 1 templateParams = TemplateParams("Proton System Default Template", PROTON) sysTemplate, isCreated, isUpdated = add_or_update_sys_template( templateParams, isSystemDefault=True ) finish_sys_template(sysTemplate, isCreated, templateParams) # 2 templateParams = TemplateParams("System Default Template", PGM) sysTemplate, isCreated, isUpdated = add_or_update_sys_template( templateParams, isSystemDefault=True ) finish_sys_template(sysTemplate, isCreated, templateParams) # 33 templateParams = TemplateParams("S5 System Default Template", S5) sysTemplate, isCreated, isUpdated = add_or_update_sys_template( templateParams, isSystemDefault=True ) finish_sys_template(sysTemplate, isCreated, templateParams) def add_or_update_ampliseq_system_templates(): # ampliseq CATEGORIES = "onco_solidTumor" # 3 templateParams = TemplateParams("Ion AmpliSeq Cancer Hotspot Panel v2", PGM, "AMPS") templateParams.update( { "flows": 500, "libraryKitName": "Ion AmpliSeq 2.0 Library Kit", "reference": "hg19", "categories": CATEGORIES, } ) sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams) finish_sys_template(sysTemplate, isCreated, templateParams) # 4 - retired # templateParams = TemplateParams("Ion AmpliSeq Cancer Panel", PGM, "AMPS") # 5 templateParams = TemplateParams( "Ion AmpliSeq Cancer Panel 1_0 Lib Chem", PGM, "AMPS" ) templateParams.update( {"flows": 500, "libraryKitName": "Ion AmpliSeq Kit", "reference": "hg19"} ) sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams) finish_sys_template(sysTemplate, isCreated, templateParams) # 6 templateParams = TemplateParams( "Ion AmpliSeq Comprehensive Cancer Panel", PROTON, "AMPS" ) templateParams.update( { "chipType": "P1.1.17", "flows": 360, "libraryKitName": "Ion AmpliSeq 2.0 Library Kit", "reference": "hg19", "targetRegionBedFile": "/hg19/unmerged/detail/CCP.20170413.designed.bed", "categories": CATEGORIES, } ) sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams) finish_sys_template(sysTemplate, isCreated, templateParams) # 7 templateParams = TemplateParams("Ion AmpliSeq Custom", PGM, "AMPS") templateParams.update({"flows": 500, "reference": "hg19"}) sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams) finish_sys_template(sysTemplate, isCreated, templateParams) # 8 templateParams = TemplateParams("Ion AmpliSeq Custom ID", PGM, "AMPS") templateParams.update( { "flows": 500, "reference": "hg19", "controlSequencekitname": "Ion AmpliSeq Sample ID Panel", } ) sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams) finish_sys_template(sysTemplate, isCreated, templateParams) # 9 """ templateParams = TemplateParams("Ion AmpliSeq Inherited Disease Panel", PGM, "AMPS") templateParams.update({ "chipType": "318", "flows": 500, "libraryKitName": "Ion AmpliSeq 2.0 Library Kit", "reference": "hg19", "categories": "inheritedDisease", "targetRegionBedFile": "/hg19/unmerged/detail/4477686_IDP_bedfile_20120613.bed", }) sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams) finish_sys_template(sysTemplate, isCreated, templateParams) """ def add_or_update_ampliseq_rna_system_templates(): # ampliseq RNA # 10 templateParams = TemplateParams("Ion AmpliSeq RNA Panel", PGM, "AMPS_RNA") templateParams.update( { "applicationGroup": "RNA", "chipType": "318", "flows": 500, "libraryKitName": "Ion AmpliSeq RNA Library Kit", "barcodeKitName": "IonXpress", "reference": "hg19_rna", } ) sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams) # pre-select plugins plugins = {} plugins["coverageAnalysis"] = _get_plugin_dict("coverageAnalysis") thirdPartyPluginName = "PartekFlowUploader" plugins[thirdPartyPluginName] = _get_plugin_dict(thirdPartyPluginName) finish_sys_template(sysTemplate, isCreated, templateParams, plugins) def add_or_update_genericseq_system_templates(): # generic sequencing # 11 templateParams = TemplateParams("Ion PGM E_coli DH10B Control 200", PGM, "GENS") templateParams.update({"reference": "e_coli_dh10b"}) sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams) finish_sys_template(sysTemplate, isCreated, templateParams) # 12 templateParams = TemplateParams("Ion PGM E_coli DH10B Control 400", PGM, "GENS") templateParams.update( { "chipType": "314", "flows": 850, "templatingKitName": "Ion PGM Hi-Q View OT2 Kit - 400", "sequencekitname": "IonPGMHiQView", "reference": "e_coli_dh10b", } ) sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams) finish_sys_template(sysTemplate, isCreated, templateParams) # 13 templateParams = TemplateParams("Ion Proton Human CEPH Control 170", PROTON, "GENS") templateParams.update({"chipType": "P1.1.17", "flows": 440, "reference": "hg19"}) sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams) finish_sys_template(sysTemplate, isCreated, templateParams) # 14 templateParams = TemplateParams("System Generic Seq Template", PGM, "GENS") templateParams.update({"flows": 500, "reference": "hg19"}) sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams) finish_sys_template(sysTemplate, isCreated, templateParams) def add_or_update_museek_system_templates(): # MuSeek # 15 templateParams = TemplateParams("MuSeek Barcoded Library", PGM, "GENS") templateParams.update( { "chipType": "318", "flows": 500, "threePrimeAdapter": DEFAULT_MUSEEK_3_PRIME_ADAPTER_SEQUENCE, "libraryKitName": "MuSeek(tm) Library Preparation Kit", "barcodeKitName": "MuSeek Barcode set 1", "reference": "hg19", } ) sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams) finish_sys_template(sysTemplate, isCreated, templateParams) # 16 templateParams = TemplateParams("MuSeek Library", PGM, "GENS") templateParams.update( { "chipType": "318", "flows": 500, "threePrimeAdapter": DEFAULT_MUSEEK_3_PRIME_ADAPTER_SEQUENCE, "libraryKitName": "MuSeek(tm) Library Preparation Kit", "barcodeKitName": "MuSeek_5prime_tag", "reference": "hg19", } ) sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams) finish_sys_template(sysTemplate, isCreated, templateParams) # 52 templateParams = TemplateParams("Ion Xpress MuSeek Library", PGM, "GENS") templateParams.update( { "threePrimeAdapter": "TGCACTGAAGCACACAATCACCGACTGCCC", "libraryKitName": "Ion Xpress MuSeek Library Preparation Kit", "barcodeKitName": "Ion Xpress MuSeek Barcode set 1", } ) sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams) finish_sys_template(sysTemplate, isCreated, templateParams) def add_or_update_rna_system_templates(): # rna sequencing # 17 templateParams = TemplateParams("Ion RNA - small", PGM, "RNA") templateParams.update( { "applicationGroup": "RNA", "chipType": "318", "flows": 160, "libraryKitName": "Ion Total RNA Seq Kit v2", "barcodeKitName": "IonXpressRNA", } ) sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams) # pre-select plugins plugins = {} thirdPartyPluginName = "PartekFlowUploader" plugins[thirdPartyPluginName] = _get_plugin_dict(thirdPartyPluginName) finish_sys_template(sysTemplate, isCreated, templateParams, plugins) # 18 templateParams = TemplateParams("Ion RNA - Whole Transcriptome", PROTON, "RNA") templateParams.update( { "applicationGroup": "RNA", "chipType": "P1.1.17", "flows": 500, "libraryKitName": "Ion Total RNA Seq Kit v2", "barcodeKitName": "IonXpressRNA", } )
# ****************** # MODULE DOCSTRING # ****************** """ LOMAP: Graph generation ===== Alchemical free energy calculations hold increasing promise as an aid to drug discovery efforts. However, applications of these techniques in discovery projects have been relatively few, partly because of the difficulty of planning and setting up calculations. The Lead Optimization Mapper (LOMAP) is an automated algorithm to plan efficient relative free energy calculations between potential ligands within a substantial of compounds. """ # ***************************************************************************** # Lomap2: A toolkit to plan alchemical relative binding affinity calculations # Copyright 2015 - 2016 UC Irvine and the Authors # # Authors: Dr <NAME>' and Dr <NAME> # # This part of the code has been originally made by <NAME>, # and <NAME> at Summa Lab, Dept. of Computer Science, # University of New Orleans and it has just been adapded to the new Lomap code # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, see http://www.gnu.org/licenses/ # ***************************************************************************** # **************** # MODULE IMPORTS # **************** import networkx as nx import numpy as np import subprocess import matplotlib.pyplot as plt import copy from operator import itemgetter from rdkit.Chem import Draw from rdkit.Chem import AllChem import os.path import logging import tempfile import shutil import traceback __all__ = ['GraphGen'] # ************************* # Graph Class # ************************* class GraphGen(object): """ This class is used to set and generate the graph used to plan binding free energy calculation """ def __init__(self, dbase): """ Inizialization function Parameters ---------- dbase : dbase object the molecule container """ self.dbase = dbase self.maxPathLength = dbase.options.max self.maxDistFromActive = dbase.options.max_dist_from_actives self.similarityScoresLimit = dbase.options.cutoff self.requireCycleCovering = not dbase.options.allow_tree if dbase.options.radial: self.lead_index = self.pick_lead() else: self.lead_index = None # A set of nodes that will be used to save nodes that are not a cycle cover for a given subgraph self.nonCycleNodesSet = set() # A set of edges that will be used to save edges that are acyclic for given subgraph self.nonCycleEdgesSet = set() # A count of the number of nodes that are not within self.maxDistFromActive edges # of an active self.distanceToActiveFailures = 0 # Draw Parameters # THIS PART MUST BE CHANGED # Max number of displayed chemical compound images as graph nodes self.max_images = 2000 # Max number of displayed nodes in the graph self.max_nodes = 100 # The maximum threshold distance in angstroms unit used to select if a molecule is depicted self.max_mol_size = 50.0 self.edge_labels = True # The following Section has been strongly copied/adapted from the original implementation # Generate a list related to the disconnected graphs present in the initial graph if dbase.options.fast and dbase.options.radial: # only enable the fast map option if use the radial option self.initialSubgraphList = self.generate_initial_subgraph_list(fast_map=True) else: self.initialSubgraphList = self.generate_initial_subgraph_list() # A list of elements made of [edge, weights] for each subgraph self.subgraphScoresLists = self.generate_subgraph_scores_lists(self.initialSubgraphList) # Eliminates from each subgraph those edges whose weights are less than the hard limit self.remove_edges_below_hard_limit() # Make a new master list of subgraphs now that there may be more disconnected components self.workingSubgraphsList = self.generate_working_subgraphs_list() # Make a new sorted list of [edge, weights] for each subgraph now that there may be new subgraphs self.workingSubgraphScoresLists = self.generate_subgraph_scores_lists(self.workingSubgraphsList) # Remove edges, whose removal does not violate constraints, from the subgraphs, # starting with lowest similarity score first if dbase.options.fast and dbase.options.radial: # if we use the fast and radial option, just need to add the surrounding edges from the initial graph self.resultGraph = self.add_surrounding_edges() # after adding the surround edges, some subgraphs may merge into a larger graph and so need to update the # current subgraphs # self.resultingSubgraphsList = copy.deepcopy(self.workingSubgraphsList) # merge all Subgraphs together for layout # self.resultGraph = self.merge_all_subgraphs() else: # >>>>>>>>>>>>>>>>>>>>>>>>>>>ISSUE ORDER PROBLEM<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< self.minimize_edges() # >>>>>>>>>>>>>>>>>>>>>>>>>>>ISSUE ORDER PROBLEM<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< # Collect together disjoint subgraphs of like charge into subgraphs self.resultingSubgraphsList = copy.deepcopy(self.workingSubgraphsList) # Combine separate subgraphs into a single resulting graph self.resultGraph = self.merge_all_subgraphs() # Make a copy of the resulting graph for later processing in connectResultingComponents() self.copyResultGraph = self.resultGraph.copy() # Holds list of edges that were added in the connect components phase self.edgesAddedInFirstTreePass = [] # Add edges to the resultingGraph to connect its components self.connect_subgraphs() return def pick_lead(self): if (self.dbase.nums() * (self.dbase.nums() - 1) / 2) != self.dbase.strict_mtx.size: raise ValueError("There are errors in the similarity score matrices") if not self.dbase.options.hub == "None": # hub radial option. Use the provided reference compound as a hub hub_index = None for i in range(0, self.dbase.nums()): if os.path.basename(self.dbase[i].getName()) == self.dbase.options.hub: hub_index = i if hub_index is None: logging.info( "Warning: the specified center ligand %s is not in the ligand database, will not use the radial option." % self.dbase.options.hub) return hub_index else: # complete radial option. Pick the compound with the highest total similarity to all other compounds to use as a hub all_sum_i = [] for i in range(0, self.dbase.nums()): sum_i = 0 for j in range(0, self.dbase.nums()): sum_i += self.dbase.strict_mtx[i, j] all_sum_i.append(sum_i) max_value = max(all_sum_i) max_index = [i for i, x in enumerate(all_sum_i) if x == max_value] max_index_final = max_index[0] return max_index_final def generate_initial_subgraph_list(self, fast_map=False): """ This function generates a starting graph connecting with edges all the compounds with a positive strict similarity score Returns ------- initialSubgraphList : list of NetworkX graph the list of connected component graphs """ compound_graph = nx.Graph() if (self.dbase.nums() * (self.dbase.nums() - 1) / 2) != self.dbase.strict_mtx.size: raise ValueError("There are errors in the similarity score matrices") if not fast_map: # if not fast map option, connect all possible nodes to generate the initial graph for i in range(0, self.dbase.nums()): if i == 0: compound_graph.add_node(i, ID=self.dbase[i].getID(), fname_comp=os.path.basename(self.dbase[i].getName()), active=self.dbase[i].isActive()) for j in range(i + 1, self.dbase.nums()): if i == 0: compound_graph.add_node(j, ID=self.dbase[j].getID(), fname_comp=os.path.basename(self.dbase[j].getName()), active=self.dbase[j].isActive()) wgt = self.dbase.strict_mtx[i, j] if wgt > 0.0: compound_graph.add_edge(i, j, similarity=wgt, strict_flag=True) else: # if fast map option, then add all possible radial edges as the initial graph for i in range(0, self.dbase.nums()): # add the node for i compound_graph.add_node(i, ID=self.dbase[i].getID(), fname_comp=os.path.basename(self.dbase[i].getName())) if i != self.lead_index: wgt = self.dbase.strict_mtx[i, self.lead_index] if wgt > 0: compound_graph.add_edge(i, self.lead_index, similarity=wgt, strict_flag=True) initialSubgraphGen = [compound_graph.subgraph(c).copy() for c in nx.connected_components(compound_graph)] initialSubgraphList = [x for x in initialSubgraphGen] return initialSubgraphList def generate_subgraph_scores_lists(self, subgraphList): """ This function generate a list of lists where each inner list is the weights of each edge in a given subgraph in the subgraphList, sorted from lowest to highest Returns ------- subgraphScoresLists : list of lists each list contains a tuple with the graph node indexes and their similatiry as weigth """ subgraphScoresLists = [] for subgraph in subgraphList: weightsDictionary = nx.get_edge_attributes(subgraph, 'similarity') subgraphWeightsList = [(edge[0], edge[1], weightsDictionary[edge]) for edge in weightsDictionary.keys()] subgraphWeightsList.sort(key=lambda entry: entry[2]) subgraphScoresLists.append(subgraphWeightsList) return subgraphScoresLists def remove_edges_below_hard_limit(self): """ This function removes edges below the set hard limit from each subGraph and from each weightsList """ totalEdges = 0 for subgraph in self.initialSubgraphList: weightsList = self.subgraphScoresLists[self.initialSubgraphList.index(subgraph)] index = 0 for edge in weightsList: if edge[2] < self.similarityScoresLimit: subgraph.remove_edge(edge[0], edge[1]) index = weightsList.index(edge) del weightsList[:index + 1] totalEdges = totalEdges + subgraph.number_of_edges() def generate_working_subgraphs_list(self): """ After the deletition of the edges that have a weigth less than the selected threshould the subgraph maybe disconnected and a new master list of connected subgraphs is genereted Returns ------- workingSubgraphsList : list of lists each list contains a tuple with the graph node indexes and their similatiry as weigth """ workingSubgraphsList = [] for subgraph in self.initialSubgraphList: newSubgraphList = [subgraph.subgraph(c).copy() for c in nx.connected_components(subgraph)] for newSubgraph in newSubgraphList: workingSubgraphsList.append(newSubgraph) return workingSubgraphsList def minimize_edges(self): """ Minimize edges in each subgraph while ensuring constraints are met """ for
<gh_stars>0 # ----------------------------------------------------------------------------- # Name: FishingLocations.py # Purpose: Support class for FishingLocations # # Author: <NAME> <<EMAIL>> # # Created: July 15, 2016 # License: MIT # ------------------------------------------------------------------------------ from math import isclose import logging from typing import List # Type hints from PyQt5.QtCore import pyqtProperty, QVariant, QObject, pyqtSignal, pyqtSlot from py.observer.ObserverDBUtil import ObserverDBUtil from py.observer.FishingLocationsModel import FishingLocationsModel # View model # Imports for unit testing import unittest from py.observer.ObserverDBModels import * from py.observer.ObserverTabletGPS import TabletGPS from playhouse.apsw_ext import APSWDatabase from playhouse.test_utils import test_database # noinspection PyPep8Naming class ObserverFishingLocations(QObject): modelChanged = pyqtSignal() locationChanged = pyqtSignal() unusedSignal = pyqtSignal() def __init__(self): super().__init__() self._logger = logging.getLogger(__name__) self._locations_model = FishingLocationsModel() self._current_location = None self._current_activity_id = None self._tablet_gps = TabletGPS() @pyqtProperty(QVariant, notify=unusedSignal) def tabletGPS(self): return self._tablet_gps def load_fishing_locations(self, fishing_activity_id): """ Load locations from database, build FramListModel """ self._locations_model.clear() self._current_activity_id = fishing_activity_id locs_q = FishingLocations.select().where(FishingLocations.fishing_activity == fishing_activity_id) if len(locs_q) > 0: for loc in locs_q: # Build FramListModel self._locations_model.add_location(loc) self.modelChanged.emit() @pyqtSlot(int, str, float, float, float) def update_location_by_id(self, loc_id, date, latitude, longitude, depth): try: location_item = FishingLocations.get((FishingLocations.fishing_location == loc_id)) location_item.location_date = date location_item.latitude = latitude location_item.longitude = longitude location_item.depth = depth location_item.depth_um = 'FM' location_item.save() # Update location positions in DB and the view model to handle possible shift in position. self._update_location_positions() self._logger.debug('Location update DB id {loc_id} {date} {lat} {long} {depth}'.format( loc_id=loc_id, date=date, lat=latitude, long=longitude, depth=depth)) except FishingLocations.DoesNotExist: self._logger.error('Could not find DB entry for location id {}'.format(loc_id)) @pyqtSlot(int, str, float, float, float, result=int) def add_update_location(self, position, date, latitude, longitude, depth): # depth_um assumed to be "ftm" return self.add_update_location_haul_id(self._current_activity_id, position, date, latitude, longitude, depth) @pyqtSlot(int, int, str, float, float, float, result=int) def add_update_location_haul_id(self, haul_id, position, date, latitude, longitude, depth): # depth_um assumed to be "ftm" try: try: location_item = FishingLocations.get((FishingLocations.fishing_activity == haul_id) & (FishingLocations.position == position)) self._logger.debug( 'Fishing location haul ID={}, position={} found, updating.'.format(haul_id, position)) location_item.location_date = date location_item.latitude = latitude location_item.longitude = longitude location_item.depth = depth location_item.depth_um = 'FM' location_item.position = position location_item.save() # Update the database # Update location positions in DB and the view model to handle possible shift in position. self._update_location_positions() except FishingLocations.DoesNotExist: self._logger.debug( 'Create fishing location haul ID={}, position={}'.format(haul_id, position)) user_id = ObserverDBUtil.get_current_user_id() location_item = FishingLocations.create(fishing_activity=haul_id, location_date=date, latitude=latitude, longitude=longitude, depth=depth, depth_um='FM', position=position, created_by=user_id, created_date=ObserverDBUtil.get_arrow_datestr()) self._logger.debug('Fishing location position {} created.'.format(location_item.position)) # New entry added, but position number sequence may be off, depending on datetime of new entry. # Update location positions in DB and the view model to handle possible insertion. self._update_location_positions() except Exception as e: self._logger.error(e) return location_item.fishing_location ## Primary key index of location @pyqtSlot(int) def delete_location_by_position(self, position): try: try: haul_id = self._current_activity_id location_item = FishingLocations.get((FishingLocations.fishing_activity == haul_id) & (FishingLocations.position == position)) self._logger.debug( 'Fishing location haul ID={}, position={} found, deleting.'.format(haul_id, position)) location_item.delete_instance(haul_id, position) # DB # Update location positions in DB and the view model to fill a possible gap. self._update_location_positions() except FishingLocations.DoesNotExist: self._logger.error( 'Attempt to delete non-existent fishing location haul ID={}, position={}'.format(haul_id, position)) except Exception as e: self._logger.error(e) def _get_gps_locations(self): # Intended for internal use count = self._locations_model.count locs = [] for i in range(count): locs.append({'pos': self._locations_model.get(i)['position'], 'lat': self._locations_model.get(i)['latitude'], 'long': self._locations_model.get(i)['longitude']}) return locs @pyqtSlot(QVariant, QVariant, QVariant, result=bool, name='verifyNoMatchGPSPosition') def verify_no_match_gps_position(self, position, lat_degs, long_degs): if self._locations_model.count <= 0: return True # Only have 0 or 1 location, can't clash with that. locs = self._get_gps_locations() for l in locs: self._logger.debug(f'Contemplate {position} {lat_degs} {long_degs} vs {l}') if l['pos'] != position and (isclose(l['lat'], lat_degs) or isclose(l['long'], long_degs)): self._logger.warning(f'Found close lat/long match {l}') return False return True # Else, no matches @pyqtProperty(QVariant, notify=modelChanged) def CurrentFishingLocationsModel(self): return self._locations_model @pyqtProperty(QVariant, notify=locationChanged) def currentLocation(self): return self._current_location # TODO current (selected) location # _current_location def _set_cur_prop(self, property, value): """ Helper function - set current haul properties in FramListModel @param property: property name @param value: value to store @return: """ self._locations_model.setProperty(self._internal_haul_idx, property, value) @pyqtSlot(str, result='QVariant') def getData(self, data_name): """ Shortcut to get data from the DB that doesn't deserve its own property (Note, tried to use a dict to simplify this, but DB cursors were not updating) :return: Value found in DB """ if self._current_location is None: logging.warning('Attempt to get data with null current location.') return None data_name = data_name.lower() return_val = None if data_name == 'position': return_val = self._current_location.latitude else: logging.warning('Attempt to get unknown data name: {}'.format(data_name)) return '' if return_val is None else return_val @pyqtSlot(str, QVariant) def setData(self, data_name, data_val): """ Set misc data to the DB :return: """ if self._current_location is None: logging.warning('Attempt to set data with null current location.') return data_name = data_name.lower() if data_name == 'latitude': self._current_location.latitude = float(data_val) else: logging.warning('Attempt to set unknown data name: {}'.format(data_name)) return self._current_location.save() self._set_cur_prop(data_name, data_val) logging.debug('Set {} to {}'.format(data_name, data_val)) self.modelChanged.emit() @staticmethod def _resequence_orm_location_positions(locations: List[FishingLocations]) -> List[FishingLocations]: """ Given a list of peewee ORM FishingLocations, return a list sorted by arrow time, assigning position number from -1 to N-2: Conventions: - Earliest location, aka "Set" is assigned POSITION = -1 - Latest location (in N > 1), aka "Up" is assigned POSITION 0 - If additions locations, assign position from 0 to N-2, in ascending datetime order. - In case of exactly same datetime, use FISHING_LOCATION_ID as minor sort key. :param locations: List of Peewee ORM fishing locations with POSITION values possibly out of sequence due to a location being added or deleted. :return: List of fishing locations with POSITION set by datetime order given above. Note: neither SQLite FISHING_LOCATIONS table nor FishingLocationModel's model have been updated. """ slocations = sorted(locations, key=lambda loc: " ".join([ ObserverDBUtil.str_to_datetime(loc.location_date).format('MM/DD/YYYY HH:mm'),#'YMMDDHHmm'), str.format("{0:0>5}", loc.fishing_location)])) # Earliest if len(slocations) > 0: slocations[0].position = -1 # Last if len(slocations) > 1: slocations[-1].position = 0 # In-between if len(slocations) > 2: for i in range(1, len(slocations) - 1): slocations[i].position = i return slocations def _update_location_positions(self): """ Acting upon both the OR model and view model of Fishing Locations, update the position number of all locations for this activity ID (haul) so that positions, sorted by datetime, are assigned the values -1, 1, 2, ... 0 (yes, 0 is assigned to the most recent position, the "Up" position. Assumes that at most a few tens of locations are involved, so sort, clear and reloads need not be blindingly fast. :return: None """ if (self._current_activity_id is None): logging.error("_update_location_positions called with null haul (activity) ID.") return logging.debug("Modifying entries for Haul #{} in FishingLocations table in database '{}' ...".format( self._current_activity_id, FishingLocations._meta.database.database)) # TODO: Put these select, delete, and insert operations in a transaction. # (Try#1: "with FishingLocations._meta.database:" jumped to Exception catch with "_exit_") # Get all the OR model locations for this haul locs = FishingLocations.select().where(FishingLocations.fishing_activity == self._current_activity_id) # Assign position number sorted by datetime locs_sorted = ObserverFishingLocations._resequence_orm_location_positions(locs) # Save (update) all the OR records # Position number must be unique within a haul. # To avoid non-unique position numbers on save, first delete all current entries for current haul. try: delete_query = FishingLocations.delete().where( FishingLocations.fishing_activity == self._current_activity_id) delete_query.execute() # Delete query should be faster than: # for loc in locs: # loc.delete_instance() except Exception as e: logging.error("_update_location_positions: Delete of outdated locations failed with {}.".format(e)) try: for loc_sorted in locs_sorted: # Force_insert: re-use each location's FISHING_LOCATION_ID primary key value loc_sorted.save(force_insert=True) except Exception as e: logging.error("_update_location_positions: save of updated location failed with {}.".format(e)) # Force a reload of the view model - re-read from OR model. # Side-effect: Signals the ObserverTableView that locations have changed. self.load_fishing_locations(self._current_activity_id) class TestOrmFishingLocationsModel(unittest.TestCase): """ ObserverFishingLocations interacts with the OR model of FishingLocations in ObserverDBModels and with the view model FishingLocationsModels. This class tests interactions with the OR model. Note: any write/update interaction should be done with test_database... http://stackoverflow.com/questions/15982801/custom-sqlite-database-for-unit-tests-for-code-using-peewee-orm """ def setUp(self): # TODO: Either phase out ObserverDB, or make it testable, as in ObserverDB(':memory:') # Tools available now are a Peewee test database context manager using an in-memory APSW database self.test_db = APSWDatabase(':memory:') self.test_tables = ( Vessels, Users, Programs, Trips, FishingActivities, CatchCategories, FishingLocations, ) self.test_vessel_id = 1 self.test_user_id = 1 self.test_program_id = 1 self.test_category_id = 1 self.test_activity_num = 1 # aka Haul # Only one test dataset of locations used in this test class.
None: msg = f"Currently only composed ValueSets are supported. {self.definition}" raise Exception(msg) if "exclude" in compose: msg = "Not currently supporting 'exclude' on ValueSet" raise Exception(msg) # "import" is for DSTU-2 compatibility include = compose.get("include") or compose.get("import") or [] if len(include) != 1: logger.warning( f"Ignoring ValueSet with more than 1 includes ({len(include)}: {include})" ) return None return include[0] class FHIRCodeSystem(object): """ Holds on to CodeSystems bundled with the spec. """ def __init__(self, spec: FHIRSpec, resource): assert "content" in resource self.spec = spec self.definition = resource self.url = resource.get("url") if self.url in self.spec.generator_config.mapping_rules.enum_namemap: self.name = self.spec.generator_config.mapping_rules.enum_namemap[self.url] else: self.name = self.spec.safe_enum_name(resource.get("name"), ucfirst=True) if len(self.name) < 1: raise Exception( f"Unable to create a name for enum of system {self.url}. You may need to specify a name explicitly in mappings.enum_namemap. Code system content: {resource}" ) self.description = resource.get("description") self.valueset_url = resource.get("valueSet") self.codes = None self.generate_enum = False concepts = resource.get("concept", []) if resource.get("experimental"): return if resource["content"] == "complete": self.generate_enum = True if not self.generate_enum: logger.warning( f"Will not generate enum for CodeSystem '{self.url}' whose content is {resource['content']}" ) return assert concepts, 'Expecting at least one code for "complete" CodeSystem' if len(concepts) > 200: self.generate_enum = False logger.info( f"Will not generate enum for CodeSystem '{self.url}' because it has > 200 ({len(concepts)}) concepts" ) return self.codes = self.parsed_codes(concepts) def parsed_codes(self, codes, prefix=None): found = [] for c in codes: if c["code"][:1].isdigit(): self.generate_enum = False logger.info( f"Will not generate enum for CodeSystem '{self.url}' because at least one concept code starts with a number" ) return None cd = c["code"] # name = ( # "{}-{}".format(prefix, cd) # if prefix and not cd.startswith(prefix) # else cd # ) code_name = self.spec.safe_enum_name(cd) if len(code_name) < 1: raise Exception( f"Unable to create a member name for enum '{cd}' in {self.url}. You may need to add '{cd}' to mappings.enum_map" ) c["name"] = code_name c["definition"] = c.get("definition") or c["name"] found.append(c) # nested concepts? if "concept" in c: fnd = self.parsed_codes(c["concept"]) if fnd is None: return None found.extend(fnd) return found class FHIRStructureDefinition(object): """ One FHIR structure definition. """ def __init__(self, spec, profile): self.manual_module = None self.spec = spec self.url = None self.targetname = None self.structure = None self.elements = None self.main_element = None self._class_map = {} self.classes: List[fhirclass.FHIRClass] = [] self._did_finalize = False if profile is not None: self.parse_profile(profile) def __repr__(self): return f"<{self.__class__.__name__}> name: {self.name}, url: {self.url}" @property def name(self): return self.structure.name if self.structure is not None else None def read_profile(self, filepath): """ Read the JSON definition of a profile from disk and parse. Not currently used. """ profile = None with io.open(filepath, "r", encoding="utf-8") as handle: profile = json.load(handle) self.parse_profile(profile) def parse_profile(self, profile): """ Parse a JSON profile into a structure. """ assert profile assert "StructureDefinition" == profile["resourceType"] # parse structure self.url = profile.get("url") logger.info('Parsing profile "{}"'.format(profile.get("name"))) self.structure = FHIRStructureDefinitionStructure(self, profile) def process_profile(self): """ Extract all elements and create classes. """ struct = self.structure.differential # or self.structure.snapshot if struct is not None: mapped = {} self.elements = [] for elem_dict in struct: element = FHIRStructureDefinitionElement( self, elem_dict, self.main_element is None ) self.elements.append(element) mapped[element.path] = element # establish hierarchy (may move to extra loop in case elements are no longer in order) if element.is_main_profile_element: self.main_element = element parent = mapped.get(element.parent_name) if parent: parent.add_child(element) # resolve element dependencies for element in self.elements: element.resolve_dependencies() # run check: if n_min > 0 and parent is in summary, must also be in summary for element in self.elements: if element.n_min is not None and element.n_min > 0: if ( element.parent is not None and element.parent.is_summary and not element.is_summary ): logger.error( "n_min > 0 but not summary: `{}`".format(element.path) ) element.summary_n_min_conflict = True # create classes and class properties if self.main_element is not None: snap_class, subs = self.main_element.create_class() if snap_class is None: raise Exception( 'The main element for "{}" did not create a class'.format(self.url) ) self.found_class(snap_class) for sub in subs: self.found_class(sub) self.targetname = snap_class.name def element_with_id(self, ident): """ Returns a FHIRStructureDefinitionElementDefinition with the given id, if found. Used to retrieve elements defined via `contentReference`. """ if self.elements is not None: for element in self.elements: if element.definition.id == ident: return element return None def dstu2_element_with_name(self, name): """ Returns a FHIRStructureDefinitionElementDefinition with the given name, if found. Used to retrieve elements defined via `nameReference` used in DSTU-2. """ if self.elements is not None: for element in self.elements: if element.definition.name == name: return element return None # MARK: Class Handling def found_class(self, klass): self.classes.append(klass) def needed_external_classes(self): """ Returns a unique list of class items that are needed for any of the receiver's classes' properties and are not defined in this profile. :raises: Will raise if called before `finalize` has been called. """ if not self._did_finalize: raise Exception("Cannot use `needed_external_classes` before finalizing") internal = set([c.name for c in self.classes]) needed = set() needs = [] for klass in self.classes: # are there superclasses that we need to import? sup_cls = klass.superclass if ( sup_cls is not None and sup_cls.name not in internal and sup_cls.name not in needed ): needed.add(sup_cls.name) needs.append(sup_cls) # look at all properties' classes and assign their modules for prop in klass.properties: prop_cls_name = prop.class_name if prop.enum is not None: enum_cls, did_create = fhirclass.FHIRClass.for_element(prop.enum) enum_cls.module = prop.enum.name prop.module_name = enum_cls.module if enum_cls.name not in needed: needed.add(enum_cls.name) needs.append(enum_cls) elif ( prop_cls_name not in internal and not self.spec.class_name_is_native(prop_cls_name) ): prop_cls = fhirclass.FHIRClass.with_name(prop_cls_name) if prop_cls is None: raise Exception( 'There is no class "{}" for property "{}" on "{}" in {}'.format( prop_cls_name, prop.name, klass.name, self.name ) ) else: prop.module_name = prop_cls.module if prop_cls_name not in needed: needed.add(prop_cls_name) needs.append(prop_cls) return sorted(needs, key=lambda n: n.module or n.name) def referenced_classes(self): """ Returns a unique list of **external** class names that are referenced from at least one of the receiver's `Reference`-type properties. :raises: Will raise if called before `finalize` has been called. """ if not self._did_finalize: raise Exception("Cannot use `referenced_classes` before finalizing") references = set() for klass in self.classes: for prop in klass.properties: if len(prop.reference_to_names) > 0: references.update(prop.reference_to_names) # no need to list references to our own classes, remove them for klass in self.classes: references.discard(klass.name) return sorted(references) def writable_classes(self): return [klass for klass in self.classes if klass.should_write()] # MARK: Finalizing def finalize(self): """ Our spec object calls this when all profiles have been parsed. """ # assign all super-classes as objects for cls in self.classes: if cls.superclass is None: super_cls = fhirclass.FHIRClass.with_name(cls.superclass_name) if super_cls is None and cls.superclass_name is not None: raise Exception( 'There is no class implementation for class named "{}" in profile "{}"'.format( cls.superclass_name, self.url ) ) else: cls.superclass = super_cls self._did_finalize = True class FHIRStructureDefinitionStructure(object): """ The actual structure of a complete profile. """ def __init__(self, profile, profile_dict): self.profile = profile self.name = None self.base = None self.kind = None self.subclass_of = None self.snapshot = None self.differential = None self.parse_from(profile_dict) def parse_from(self, json_dict): name = json_dict.get("name") if not name: raise Exception("Must find 'name' in profile dictionary but found nothing") self.name = self.profile.spec.class_name_for_profile(name) self.base = json_dict.get("baseDefinition") self.kind = json_dict.get("kind") if self.base: self.subclass_of = self.profile.spec.class_name_for_profile(self.base) # find element definitions if "snapshot" in json_dict: self.snapshot = json_dict["snapshot"].get("element", []) if "differential" in json_dict: self.differential = json_dict["differential"].get("element", []) class FHIRStructureDefinitionElement(object): """ An element in a profile's structure. """ def __init__(self, profile, element_dict, is_main_profile_element=False): assert isinstance(profile, FHIRStructureDefinition) self.profile = profile self.path = None self.parent = None self.children = None self.parent_name = None self.definition = None self.n_min = None self.n_max = None self.is_summary = False # to mark conflicts, see #13215 (http://gforge.hl7.org/gf/project/fhir/tracker/?action=TrackerItemEdit&tracker_item_id=13125) self.summary_n_min_conflict = False self.valueset = None self.enum = None # assigned if the element has a binding to a ValueSet that is a CodeSystem generating an enum self.is_main_profile_element = is_main_profile_element self.represents_class = False self._superclass_name = None self._name_if_class = None self._did_resolve_dependencies = False if element_dict is not None: self.parse_from(element_dict) else: self.definition = FHIRStructureDefinitionElementDefinition(self, None) def parse_from(self, element_dict): self.path = element_dict["path"] parts = self.path.split(".") self.parent_name = ".".join(parts[:-1]) if len(parts) > 0 else None prop_name = parts[-1] if "-"
<gh_stars>10-100 #!/usr/bin/python3 ''' NAME: ap_ctl.py PURPOSE: Script that logs into an AP via Serial, SSH, or Telnet to read data or execute commands EXAMPLE: ./ap_ctl.py --scheme "serial" "--tty "Serial port for accessing AP" --prompt "#" --dest <ip if using SSH or Telnet> --port <port , none for serial> --user <user name> --passwd <password> --action <command> In a program: ap_info= subprocess.run(["./ap_ctl.py", "--scheme", ap_dict['ap_scheme'], "--prompt", ap_dict['ap_prompt'],"--dest", ap_dict['ap_ip'], "--port", ap_dict["ap_port"], "--user", ap_dict['ap_user'], "--passwd", ap_dict['ap_pw'],"--action", "powercfg"],stdout=subprocess.PIPE) NOTES: LANforge 192.168.100.178 Controller at 192.168.100.112 admin/Cisco123 Controller is 172.16.31.10 AP is on serial port /dev/ttyUSB1 or /dev/ttyUSB2 9600 8 n 1 make sure pexpect is installed: $ sudo yum install python3-pexpect You might need to install pexpect-serial using pip: $ pip3 install pexpect-serial $ sudo pip3 install pexpect-serial ./ap_ctl.py ''' import sys if sys.version_info[0] != 3: print("This script requires Python 3") exit() import logging import time from time import sleep import argparse import pexpect import serial from pexpect_serial import SerialSpawn # pip install pexpect-serial (on Ubuntu) # sudo pip install pexpect-serial (on Ubuntu for everyone) default_host = "localhost" default_ports = { "serial": None, "ssh": 22, "telnet": 23 } NL = "\n" CR = "\r\n" Q = '"' A = "'" FORMAT = '%(asctime)s %(name)s %(levelname)s: %(message)s' band = "a" logfile = "stdout" # regex101.com , # this will be in the tx_power script # ^\s+1\s+6\s+\S+\s+\S+\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+) def usage(): print("$0 used connect to Cisco AP:") print("-a|--ap: AP to act upon") print("-d|--dest: destination host") print("-o|--port: destination port") print("-u|--user: AP login name") print("-p|--pass: AP password") print("-s|--scheme (serial|telnet|ssh): connect to controller via serial, ssh or telnet") print("--tty Serial port for accessing AP") print("-l|--log file: log messages here") print("-b|--baud: serial baud rate") print("-z|--action: action") print("-h|--help") # see https://stackoverflow.com/a/13306095/11014343 class FileAdapter(object): def __init__(self, logger): self.logger = logger def write(self, data): # NOTE: data can be a partial line, multiple lines data = data.strip() # ignore leading/trailing whitespace if data: # non-blank self.logger.info(data) def flush(self): pass # leave it to logging to flush properly # Test command if lanforge connected ttyUSB0 # sudo ./ap_ctl.py -a lanforge -d 0 -o 0 -u "lanforge" -p "lanforge" -s "serial" -t "/dev/ttyUSB0" # sample for lanforge 192.168.100.178 # sudo ./ap_ctl.py -a APA53.0E7B.EF9C -d 0 -o 0 -u "admin" -p "Admin123" -s "serial" -t "/dev/ttyUSB2" -z "show_log" def main(): global logfile AP_ESCAPE = "Escape character is '^]'." AP_USERNAME = "Username:" AP_PASSWORD = "Password:" AP_EN = "en" AP_MORE = "--More--" AP_EXIT = "exit" LF_PROMPT = "$" CR = "\r\n" parser = argparse.ArgumentParser(description="Cisco AP Control Script") parser.add_argument("-a", "--prompt", type=str, help="ap prompt") parser.add_argument("-d", "--dest", type=str, help="address of the AP 172.19.27.55") parser.add_argument("-o", "--port", type=int, help="control port on the AP, 2008") parser.add_argument("-u", "--user", type=str, help="credential login/username, admin") parser.add_argument("-p", "--passwd", type=str, help="credential password <PASSWORD>") parser.add_argument("-s", "--scheme", type=str, choices=["serial", "ssh", "telnet"], help="Connect via serial, ssh or telnet") parser.add_argument("-t", "--tty", type=str, help="tty serial device for connecting to AP") parser.add_argument("-l", "--log", type=str, help="logfile for messages, stdout means output to console",default="stdout") parser.add_argument("-z", "--action", type=str, help="action, current action is powercfg") parser.add_argument("-b", "--baud", type=str, help="action, baud rate lanforge: 115200 cisco: 9600") args = None try: args = parser.parse_args() host = args.dest scheme = args.scheme port = (default_ports[scheme], args.port)[args.port != None] user = args.user if (args.log != None): logfile = args.log except Exception as e: logging.exception(e) usage() exit(2) console_handler = logging.StreamHandler() formatter = logging.Formatter(FORMAT) logg = logging.getLogger(__name__) logg.setLevel(logging.DEBUG) file_handler = None if (logfile is not None): if (logfile != "stdout"): file_handler = logging.FileHandler(logfile, "w") file_handler.setLevel(logging.DEBUG) file_handler.setFormatter(formatter) logg.addHandler(file_handler) logging.basicConfig(format=FORMAT, handlers=[file_handler]) else: # stdout logging logging.basicConfig(format=FORMAT, handlers=[console_handler]) egg = None # think "eggpect" ser = None try: if (scheme == "serial"): #eggspect = pexpect.fdpexpect.fdspan(telcon, logfile=sys.stdout.buffer) ser = serial.Serial(args.tty, int(args.baud), timeout=5) print("Created serial connection on %s, open: %s"%(args.tty, ser.is_open)) egg = SerialSpawn(ser) egg.logfile = FileAdapter(logg) time.sleep(1) egg.sendline(CR) time.sleep(1) elif (scheme == "ssh"): if (port is None): port = 22 cmd = "ssh -p%d %s@%s"%(port, user, host) logg.info("Spawn: "+cmd+NL) egg = pexpect.spawn(cmd) #egg.logfile_read = sys.stdout.buffer egg.logfile = FileAdapter(logg) elif (scheme == "telnet"): if (port is None): port = 23 cmd = "telnet {} {}".format(host, port) logg.info("Spawn: "+cmd+NL) egg = pexpect.spawn(cmd) egg.logfile = FileAdapter(logg) # Will login below as needed. else: usage() exit(1) except Exception as e: logging.exception(e) AP_PROMPT = "{}>".format(args.prompt) AP_HASH = "{}#".format(args.prompt) time.sleep(0.1) logged_in = False loop_count = 0 while (loop_count <= 8 and logged_in == False): loop_count += 1 i = egg.expect_exact([AP_ESCAPE,AP_PROMPT,AP_HASH,AP_USERNAME,AP_PASSWORD,AP_MORE,LF_PROMPT,pexpect.TIMEOUT],timeout=5) if i == 0: logg.info("Expect: {} i: {} before: {} after: {}".format(AP_ESCAPE,i,egg.before,egg.after)) egg.sendline(CR) # Needed after Escape or should just do timeout and then a CR? sleep(1) if i == 1: logg.info("Expect: {} i: {} before: {} after: {}".format(AP_PROMPT,i,egg.before,egg.after)) egg.sendline(AP_EN) sleep(1) j = egg.expect_exact([AP_PASSWORD,pexpect.TIMEOUT],timeout=5) if j == 0: logg.info("Expect: {} i: {} j: {} before: {} after: {}".format(AP_PASSWORD,i,j,egg.before,egg.after)) egg.sendline(args.passwd) sleep(1) k = egg.expect_exact([AP_HASH,pexpect.TIMEOUT],timeout=5) if k == 0: logg.info("Expect: {} i: {} j: {} k: {} before: {} after: {}".format(AP_PASSWORD,i,j,k,egg.before,egg.after)) logged_in = True if k == 1: logg.info("Expect: {} i: {} j: {} k: {} before: {} after: {}".format("Timeout",i,j,k,egg.before,egg.after)) if j == 1: logg.info("Expect: {} i: {} j: {} before: {} after: {}".format("Timeout",i,j,egg.before,egg.after)) if i == 2: logg.info("Expect: {} i: {} before: {} after: {}".format(AP_HASH,i,egg.before,egg.after)) logged_in = True sleep(1) if i == 3: logg.info("Expect: {} i: {} before: {} after: {}".format(AP_USERNAME,i,egg.before,egg.after)) egg.sendline(args.user) sleep(1) if i == 4: logg.info("Expect: {} i: {} before: {} after: {}".format(AP_PASSWORD,i,egg.before,egg.after)) egg.sendline(args.passwd) sleep(1) if i == 5: logg.info("Expect: {} i: {} before: {} after: {}".format(AP_MORE,i,egg.before,egg.after)) if (scheme == "serial"): egg.sendline("r") else: egg.sendcontrol('c') sleep(1) # for Testing serial connection using Lanforge if i == 6: logg.info("Expect: {} i: {} before: {} after: {}".format(LF_PROMPT,i,egg.before.decode('utf-8', 'ignore'),egg.after.decode('utf-8', 'ignore'))) if (loop_count < 3): egg.send("ls -lrt") sleep(1) if (loop_count > 4): logged_in = True # basically a test mode using lanforge serial if i == 7: logg.info("Expect: {} i: {} before: {} after: {}".format("Timeout",i,egg.before,egg.after)) egg.sendline(CR) sleep(1) if (args.action == "powercfg"): logg.info("execute: show controllers dot11Radio 1 powercfg | g T1") egg.sendline('show controllers dot11Radio 1 powercfg | g T1') egg.expect([pexpect.TIMEOUT], timeout=3) # do not delete this for it allows for subprocess to see output print(egg.before.decode('utf-8', 'ignore')) # do not delete this for it allows for subprocess to see output i = egg.expect_exact([AP_MORE,pexpect.TIMEOUT],timeout=5) if i == 0: egg.sendcontrol('c') if i == 1: logg.info("send cntl c anyway") egg.sendcontrol('c') elif (args.action == "clear_log"): logg.info("execute: clear log") egg.sendline('clear log') sleep(0.4) egg.sendline('show log') egg.expect([pexpect.TIMEOUT], timeout=2) # do not delete this for it allows for subprocess to see output print(egg.before.decode('utf-8', 'ignore')) # do not delete this for it allows for subprocess to see output # allow for normal logout below elif (args.action == "show_log"): logg.info("execute: show log") egg.sendline('show log') sleep(0.4) egg.expect([pexpect.TIMEOUT], timeout=2) # do not delete this for it allows for subprocess to see output print(egg.before.decode('utf-8', 'ignore')) # do not delete this for it allows for subprocess to see output i = egg.expect_exact([AP_MORE,pexpect.TIMEOUT],timeout=4) if i == 0: egg.sendline('r') egg.expect([pexpect.TIMEOUT], timeout=4) # do not delete this for it allows for subprocess to see output print(egg.before.decode('utf-8', 'ignore')) # do not delete this for it allows for subprocess to see output if i == 1: print(egg.before.decode('utf-8', 'ignore')) # do not delete this for it allows for subprocess to see output # allow for normal logout below # show log | g DOT11_DRV # CAC_EXPIRY_EVT: CAC finished on DFS channel 52 elif (args.action == "cac_expiry_evt"): logg.info("execute: show log | g CAC_EXPIRY_EVT") egg.sendline('show log | g CAC_EXPIRY_EVT') sleep(0.4) egg.expect([pexpect.TIMEOUT], timeout=2) # do not delete this for it allows for subprocess to see output print(egg.before.decode('utf-8', 'ignore')) # do not delete this for it allows for subprocess to see output i = egg.expect_exact([AP_MORE,pexpect.TIMEOUT],timeout=4) if i == 0: egg.sendline('r') egg.expect([pexpect.TIMEOUT], timeout=4) # do not delete this for it allows for subprocess to see output print(egg.before.decode('utf-8', 'ignore')) # do not delete this for it allows for subprocess to see output if i == 1: print(egg.before.decode('utf-8', 'ignore')) # do not delete this for it allows for subprocess to see output elif (args.action == "ds_data_5ghz"): logg.info("execute: wl -i wl1 bs_data") egg.sendline('wl -i wl1 bs_data') egg.expect([pexpect.TIMEOUT], timeout=4) # do not detete this for it allow for subprocess to read print(egg.before.decode('utf-8','ignore')) # do not delete this for it allows for subprocess to see output elif (args.action == "ds_data_24ghz"): logg.info("execute: wl -i wl0 bs_data") egg.sendline('wl -i
<filename>prep/prepare_bodymap.py # Prepare bodymap will parse labels from the FMA # - including terms likely to be found in social media from PyDictionary import PyDictionary # pip install PyDictionary from svgtools.generate import create_pointilism_svg from svgtools.utils import save_json from nlp import processText # nlp module from wordfish from glob import glob from time import sleep import pandas import pyproj # coordinate conversion import json import re # STEP 0: PREPARE BODYMAP #################################################################### png_image = "data/body.png" create_pointilism_svg(png_image,uid_base="bodymap", sample_rate=8,width=330,height=800, output_file="data/bodymappp.svg") # STEP 1: PREPARE DATA ####################################################################### files = glob("data/*.csv") fatalities = pandas.DataFrame(columns=["FISCAL_YEAR","SUMMARY_DATE","INCIDENT_DATE","COMPANY","DESCRIPTION"]) # Original headers for f in files: print "\n%s" %(f) fatcat = pandas.read_csv(f) print ",".join(fatcat.columns.tolist()) #FISCAL_YEAR,SUMMARY_DATE,INCIDENT_DATE,COMPANY,DESCRIPTION # data/FatalitiesFY11.csv # Fiscal Year ,Summary Report Date,Date of Incident,Company,Preliminary Description of Incident # data/FatalitiesFY13.csv # Date of Incident,Company, City, State, ZIP,Preliminary Description of Incident,Fatality or Catastrophe # data/fatalitiesFY15.csv # Date of Incident,Company, City, State, ZIP,Victim(s),Preliminary Description of Incident,Fatality or Catastrophe,Inspection #,Unnamed: 6,Unnamed: 7,Unnamed: 8,Unnamed: 9,Unnamed: 10,Unnamed: 11,Unnamed: 12,Unnamed: 13,Unnamed: 14,Unnamed: 15,Unnamed: 16,Unnamed: 17,Unnamed: 18,Unnamed: 19 # data/FatalitiesFY09.csv # Fiscal Year ,Summary Report Date,Date of Incident,Company,Preliminary Description of Incident # data/fatalitiesFY16.csv # Date of Incident ,Employer/Address of Incident ,Victim(s) ,Hazard Description ,Fatality or Catastrophe ,Inspection # # data/FatalitiesFY14.csv # Date of Incident,Company, City, State, ZIP,Preliminary Description of Incident,Fatality or Catastrophe,Unnamed: 4,Unnamed: 5,Unnamed: 6,Unnamed: 7,Unnamed: 8,Unnamed: 9,Unnamed: 10,Unnamed: 11,Unnamed: 12,Unnamed: 13,Unnamed: 14,Unnamed: 15,Unnamed: 16,Unnamed: 17 # data/FatalitiesFY12.csv # Fiscal Year ,Summary Report Date,Date of Incident,Preliminary Description of Incident,Unnamed: 4 # data/fatalitiesFY10.csv # Fiscal Year ,Summary Report Date,Date of Incident,Company,Preliminary Description of Incident for f in files: print "Adding file %s" %(f) fatcat = pandas.read_csv(f) # Generate index based on year match = re.search("[0-9]+",f) year = f[match.start():match.end()] rownames = ["%s_%s" %(year,x) for x in range(fatcat.shape[0])] fatcat.index = rownames shared_columns = [c for c in fatcat.columns if c in fatalities.columns] fatalities = fatalities.append(fatcat[shared_columns]) fatalities # [7852 rows x 5 columns] # We have one null date from 2016 - assign year 2016 fatalities.INCIDENT_DATE[fatalities["INCIDENT_DATE"].isnull()] = "01/01/2016" fatalities.to_csv("data/fatalities_all.tsv",sep="\t") # STEP 3: COORDINATE-IZE ##################################################################### # The company variable has the company name and location, we need to split it locations = [] companies = [] for row in fatalities.iterrows(): company = row[1].COMPANY locations.append("".join(company.split(',')[-2:]).strip()) companies.append("".join(company.split(',')[:2]).strip()) fatalities = fatalities.rename(index=str, columns={"COMPANY": "COMPANY_ORIGINAL"}) fatalities["LOCATION_RAW"] = locations fatalities["COMPANY"] = companies fatalities.to_csv("data/fatalities_all.tsv",sep="\t") # Replace weird latin characters normalized = [x.replace('\xa0', '') for x in fatalities["LOCATION_RAW"]] fatalities.LOCATION_RAW = normalized # https://pypi.python.org/pypi/geopy from geopy.geocoders import Nominatim geolocator = Nominatim() # Function to add an entry def add_entry(index,location,fatalities): fatalities.loc[index,"LOCATION"] = location.address fatalities.loc[index,"ALTITUDE"] = location.altitude fatalities.loc[index,"LATITUDE"] = location.latitude fatalities.loc[index,"LONGITUDE"] = location.longitude fatalities.loc[index,"LOCATION_IMPORTANCE"] = location.raw["importance"] return fatalities manual_inspection = [] for row in fatalities.iterrows(): index = row[0] address = row[1].LOCATION_RAW if row[1].LOCATION == "" and index not in manual_inspection: location = geolocator.geocode(address) sleep(0.5) if location != None: fatalities = add_entry(index,location,fatalities) else: print "Did not find %s" %(address) manual_inspection.append(index) # Function to normalize unicode to ascii, remove characters def normalize_locations(fatalities): locs=[] for fat in fatalities.LOCATION.tolist(): if isinstance(fat,float): locs.append("") elif isinstance(fat,unicode): locs.append(unicodedata.normalize("NFC",fat).encode('ASCII', 'ignore')) else: locs.append(fat) fatalities.LOCATION=locs return fatalities fatalities = normalize_locations(fatalities) fatalities.to_csv("data/fatalities_all.tsv",sep="\t") found = [] not_found = [] while len(manual_inspection) > 0: mi = manual_inspection.pop() row = fatalities.loc[mi] # Try finding the state, and keeping one word before it, adding comma address = row.LOCATION_RAW match = re.search("\s\w+\s[A-Z]{2}",address) wasfound = False if match!= None: address = address[match.start():].strip() location = geolocator.geocode(address) sleep(0.5) if location != None: print "FOUND %s" %(address) wasfound = True fatalities = add_entry(index,location,fatalities) # Save the address that was used fatalities.loc[index,"LOCATION_RAW"] = address found.append(mi) if wasfound == False: not_found.append(mi) manual_inspection = [x for x in manual_inspection if x not in found] fatalities = normalize_locations(fatalities) fatalities.to_csv("data/fatalities_all.tsv",sep="\t") # Try just using zip code - this might be best strategy found = [] not_found = [] while len(manual_inspection) > 0: mi = manual_inspection.pop() row = fatalities.loc[mi] # Try finding the state, and keeping one word before it, adding comma address = row.LOCATION_RAW match = re.search("[A-Z]{2}",address) wasfound = False if match!= None: address = address[match.start():].strip() location = geolocator.geocode(address) sleep(0.5) if location != None: print "FOUND %s" %(address) wasfound = True fatalities = add_entry(index,location,fatalities) # Save the address that was used fatalities.loc[index,"LOCATION_RAW"] = address found.append(mi) if wasfound == False: not_found.append(mi) fatalities = normalize_locations(fatalities) fatalities.to_csv("data/fatalities_all.tsv",sep="\t") # Manual work to find above # (reason failed) for mi in not_found: print 'fatalities.loc[%s,"LOCATION_RAW"] = "" #' %mi fatalities.loc[7680,"LOCATION_RAW"] = "FL 34945" # wrong zip code fatalities.loc[7666,"LOCATION_RAW"] = "TX 77351" # nearby town Leggett fatalities.loc[7623,"LOCATION_RAW"] = "TN 37868" # wrong zip code fatalities.loc[7581,"LOCATION_RAW"] = "MO 64836" # wrong zip code fatalities.loc[7579,"LOCATION_RAW"] = "MA 02108" # wrong zip code fatalities.loc[7577,"LOCATION_RAW"] = "IL 62701" # fatalities.loc[7561,"LOCATION_RAW"] = "TX 77541" # fatalities.loc[7546,"LOCATION_RAW"] = "IA 50644" # fatalities.loc[7541,"LOCATION_RAW"] = "ND 58201" # fatalities.loc[7521,"LOCATION_RAW"] = "UT 84078" # fatalities.loc[7479,"LOCATION_RAW"] = "TX 78836" # fatalities.loc[7335,"LOCATION_RAW"] = "ND 58601" # fatalities.loc[7232,"LOCATION_RAW"] = "WA 98003" # wrong zip code fatalities.loc[7185,"LOCATION_RAW"] = "TX 77001" # fatalities.loc[7182,"LOCATION_RAW"] = "TX 75956" # fatalities.loc[7148,"LOCATION_RAW"] = "MD 21201" # fatalities.loc[7060,"LOCATION_RAW"] = "TX 75766" # had name of center fatalities.loc[7053,"LOCATION_RAW"] = "OR 97503" # had name of department fatalities.loc[7027,"LOCATION_RAW"] = "IN 46806" # pizza shop! fatalities.loc[7024,"LOCATION_RAW"] = "TX 75560" # too much in address fatalities.loc[7013,"LOCATION_RAW"] = "TX 77662" # too much in address fatalities.loc[7005,"LOCATION_RAW"] = "AZ 85262" # "" fatalities.loc[6996,"LOCATION_RAW"] = "MD 20847" # wrong zip code fatalities.loc[6986,"LOCATION_RAW"] = "MN 55421" # fatalities.loc[6985,"LOCATION_RAW"] = "MA 02130" # city misspelled fatalities.loc[6890,"LOCATION_RAW"] = "TX 78401" # fatalities.loc[6887,"LOCATION_RAW"] = "IL 60415" # no address fatalities.loc[6809,"LOCATION_RAW"] = "WA 98101" # fatalities.loc[6804,"LOCATION_RAW"] = "TN 38478" # different sites mentioned fatalities.loc[6792,"LOCATION_RAW"] = "MN 55992" # different sites mentioned fatalities.loc[6716,"LOCATION_RAW"] = "IN 47901" # only company name fatalities.loc[6477,"LOCATION_RAW"] = "CA 95526" # fatalities.loc[6452,"LOCATION_RAW"] = "NM 87501" # fatalities.loc[6431,"LOCATION_RAW"] = "TX 79754" # fatalities.loc[6414,"LOCATION_RAW"] = "ME 04945" # wrong state! # this is conspicuous - reported twice, wrong state fatalities.loc[6412,"LOCATION_RAW"] = "ME 04945" # same fatalities.loc[6384,"LOCATION_RAW"] = "AK 72315" # fatalities.loc[6301,"LOCATION_RAW"] = "TX 79754" # this place has already been reported # Reeco Well Services and Joyce Fisher Limited Partnership fatalities.loc[6217,"LOCATION_RAW"] = "CA 92331" # fatalities.loc[6123,"LOCATION_RAW"] = "AR 72175" # fatalities.loc[5996,"LOCATION_RAW"] = "ND 58847" # fatalities.loc[5976,"LOCATION_RAW"] = "ND 58847" # fatalities.loc[5559,"LOCATION_RAW"] = "CA 95050" # fatalities.loc[5412,"LOCATION_RAW"] = "MS 39567" # fatalities.loc[5402,"LOCATION_RAW"] = "TX 77573" # fatalities.loc[5389,"LOCATION_RAW"] = "TX 78836" # second one in Catarina fatalities.loc[5354,"LOCATION_RAW"] = "TX 77840" # fatalities.loc[5238,"LOCATION_RAW"] = "WI 53705" # fatalities.loc[5020,"LOCATION_RAW"] = "TX 78021" # fatalities.loc[4932,"LOCATION_RAW"] = "AS 96799" # fatalities.loc[4761,"LOCATION_RAW"] = "OH 44101" # fatalities.loc[4631,"LOCATION_RAW"] = "KY 40502" # spelling error fatalities.loc[4546,"LOCATION_RAW"] = "CT 06840" # fatalities.loc[4436,"LOCATION_RAW"] = "TX 75421" # fatalities.loc[4395,"LOCATION_RAW"] = "MI 49201" # fatalities.loc[4320,"LOCATION_RAW"] = "IL 62640" # fatalities.loc[4251,"LOCATION_RAW"] = "CA 91722" # fatalities.loc[4140,"LOCATION_RAW"] = "KY 42440" # lowecase state letter fatalities.loc[4123,"LOCATION_RAW"] = "TX 79401" # fatalities.loc[3928,"LOCATION_RAW"] = "FL 33101" # fatalities.loc[3820,"LOCATION_RAW"] = "NM 97743" # fatalities.loc[3812,"LOCATION_RAW"] = "NM 87420" # wrong zip code fatalities.loc[3758,"LOCATION_RAW"] = "TX 75960" # fatalities.loc[3666,"LOCATION_RAW"] = "TX 78864" # fatalities.loc[3661,"LOCATION_RAW"] = "LA 70001" # fatalities.loc[3643,"LOCATION_RAW"] = "NY 11215" # fatalities.loc[3627,"LOCATION_RAW"] = "TX 77070" # fatalities.loc[3618,"LOCATION_RAW"] = "TX 75022" # fatalities.loc[3446,"LOCATION_RAW"] = "IN 46507" # wrong state fatalities.loc[3344,"LOCATION_RAW"] = "TX 77572" # fatalities.loc[3197,"LOCATION_RAW"] = "AZ 85206" # WalMart store number fatalities.loc[3133,"LOCATION_RAW"] = "NJ 09753" # fatalities.loc[2984,"LOCATION_RAW"] = "AK 99501" # fatalities.loc[2770,"LOCATION_RAW"] = "KY 42431" # wrong zip code fatalities.loc[2749,"LOCATION_RAW"] = "TX 78349" # fatalities.loc[2305,"LOCATION_RAW"] = "OK 73043" # fatalities.loc[2283,"LOCATION_RAW"] = "CA 95618" # zip for wrong state fatalities.loc[2280,"LOCATION_RAW"] = "WA 98036" # fatalities.loc[2226,"LOCATION_RAW"] = "FL 33178" # fatalities.loc[2058,"LOCATION_RAW"] = "MS 39701" # fatalities.loc[2032,"LOCATION_RAW"] = "OK 73660" # fatalities.loc[1980,"LOCATION_RAW"] = "WV 24931" # fatalities.loc[1962,"LOCATION_RAW"] = "CA 92501" # fatalities.loc[1959,"LOCATION_RAW"] = "TX 77571" # fatalities.loc[1915,"LOCATION_RAW"] = "IL 61748" # fatalities.loc[1898,"LOCATION_RAW"] = "WA 98660" # fatalities.loc[1873,"LOCATION_RAW"] = "TX 78201" # extra number in zip fatalities.loc[1863,"LOCATION_RAW"] = "TX 78353" # fatalities.loc[1635,"LOCATION_RAW"] = "AS 96799" # American Samoa? fatalities.loc[1492,"LOCATION_RAW"] = "AS 96799" # fatalities.loc[1477,"LOCATION_RAW"] = "TN 38340" # fatalities.loc[1406,"LOCATION_RAW"] = "TX 77501" # fatalities.loc[1335,"LOCATION_RAW"] = "TX 78353" # fatalities.loc[1224,"LOCATION_RAW"] = "CA 92879" # fatalities.loc[1065,"LOCATION_RAW"] = "OK 73030" # wrong zip code fatalities.loc[806,"LOCATION_RAW"] = "IA 52240" # fatalities.loc[618,"LOCATION_RAW"] = "CA 90401" # fatalities.loc[543,"LOCATION_RAW"] = "OK 73101" # fatalities.loc[509,"LOCATION_RAW"] = "TN 37738" # fatalities.loc[504,"LOCATION_RAW"] = "TX 78836" # fatalities.loc[453,"LOCATION_RAW"] = "FL 32899" # fatalities.loc[449,"LOCATION_RAW"] = "NY 11201" # fatalities.loc[445,"LOCATION_RAW"] = "IA 50701" # fatalities.loc[364,"LOCATION_RAW"] = "KY 41413" # fatalities.loc[318,"LOCATION_RAW"] = "TX 75029" # fatalities.loc[311,"LOCATION_RAW"] = "MA 02151" # fatalities.loc[182,"LOCATION_RAW"] = "KY 40201" # def search_locations(fatalities,not_found,found): for mi in not_found: row = fatalities.loc[mi] address = row.LOCATION_RAW location = geolocator.geocode(address) sleep(0.5) if location != None: print "FOUND %s" %(address) fatalities = add_entry(index,location,fatalities) # Save the address that was used fatalities.loc[index,"LOCATION_RAW"] = address found.append(mi) not_found = [x for x in not_found if x not in found] return fatalities,not_found,found fatalities,not_found,found = search_locations(fatalities,not_found,found) fatalities = normalize_locations(fatalities) fatalities.to_csv("data/fatalities_all.tsv",sep="\t") # One more round! Want to get these all mapped! # This time I will look up the company address for mi in not_found: print 'fatalities.loc[%s,"LOCATION_RAW"] = "" #' %mi fatalities.loc[7479,"LOCATION_RAW"] = "TX 78119" # fatalities.loc[6431,"LOCATION_RAW"] = "TX 79772" # nearby town, pecos TX fatalities.loc[6384,"LOCATION_RAW"] = "TX 76006" # fatalities.loc[6301,"LOCATION_RAW"] = "TX 79772" # fatalities.loc[5996,"LOCATION_RAW"] = "ND 58831" # fatalities.loc[5976,"LOCATION_RAW"] = "ND 58601" # fatalities.loc[5389,"LOCATION_RAW"] = "TX 78109" # fatalities.loc[5020,"LOCATION_RAW"] = "TX 78022" # fatalities.loc[4932,"LOCATION_RAW"] = "AS 96799"
self._cm_rnn_start_ind += len(fc_layers_pre) # We use odd numbers for actual layers and even number for all # context-mod layers. rem_cm_inds = range(2, 2*(len(fc_layers_pre)+len(rnn_layers)+\ len(fc_layers))+1, 2) num_rec_cm_layers = len(rnn_layers) if has_rec_out_layer and not self._context_mod_outputs: num_rec_cm_layers -= 1 self._num_rec_cm_layers = num_rec_cm_layers jj = 0 # Add initial fully-connected context-mod layers. num_fc_pre_cm_layers = len(fc_layers_pre) self._num_fc_pre_cm_layers = num_fc_pre_cm_layers for i in range(num_fc_pre_cm_layers): cm_shapes.append([fc_layers_pre[i]]) cm_layer_inds.append(rem_cm_inds[jj]) jj += 1 # Add recurrent context-mod layers. for i in range(num_rec_cm_layers): if context_mod_num_ts != -1: if context_mod_separate_layers_per_ts: cm_rnn_shapes = [[rnn_layers[i]]] * context_mod_num_ts else: # Only a single context-mod layer will be added, but we # directly edit the correponding `param_shape` later. assert self._context_mod_no_weights cm_rnn_shapes = [[rnn_layers[i]]] else: cm_rnn_shapes = [[rnn_layers[i]]] cm_shapes.extend(cm_rnn_shapes) cm_layer_inds.extend([rem_cm_inds[jj]] * len(cm_rnn_shapes)) jj += 1 # Add final fully-connected context-mod layers. num_fc_cm_layers = len(fc_layers) if num_fc_cm_layers > 0 and not self._context_mod_outputs: num_fc_cm_layers -= 1 self._num_fc_cm_layers = num_fc_cm_layers for i in range(num_fc_cm_layers): cm_shapes.append([fc_layers[i]]) cm_layer_inds.append(rem_cm_inds[jj]) jj += 1 self._add_context_mod_layers(cm_shapes, cm_layers=cm_layer_inds) if context_mod_num_ts != -1 and not \ context_mod_separate_layers_per_ts: # In this case, there is only one context-mod layer for each # recurrent layer, but we want to have separate weights per # timestep. # Hence, we adapt the expected parameter shape, such that we # get a different set of weights per timestep. This will be # split into multiple weights that are succesively fed into the # same layer inside the forward method. for i in range(num_rec_cm_layers): cmod_layer = \ self.context_mod_layers[self._cm_rnn_start_ind+i] cm_shapes_rnn = [[context_mod_num_ts, *s] for s in \ cmod_layer.param_shapes] ps_ind = int(np.sum([ \ len(self.context_mod_layers[ii].param_shapes) \ for ii in range(self._cm_rnn_start_ind+i)])) self._param_shapes[ps_ind:ps_ind+len(cm_shapes_rnn)] = \ cm_shapes_rnn assert self._hyper_shapes_learned is not None self._hyper_shapes_learned[ \ ps_ind:ps_ind+len(cm_shapes_rnn)] = cm_shapes_rnn ######################## ### Internal weights ### ######################## prev_dim = self._n_in def define_fc_layer_weights(fc_layers, prev_dim, num_prev_layers): """Define the weights and shapes of the fully-connected layers. Args: fc_layers (list): The list of fully-connected layer dimensions. prev_dim (int): The output size of the previous layer. num_prev_layers (int): The number of upstream layers to the current one (a layer with its corresponding context-mod layer(s) count as one layer). Count should start at ``1``. Returns: (int): The output size of the last fully-connected layer considered here. """ # FIXME We should instead build an MLP instance. But then we still # have to adapt all attributes accordingly. for i, n_fc in enumerate(fc_layers): s_w = [n_fc, prev_dim] s_b = [n_fc] if self._has_bias else None for j, s in enumerate([s_w, s_b]): if s is None: continue is_bias = True if j % 2 == 0: is_bias = False if not self._no_weights: self._weights.append(nn.Parameter(torch.Tensor(*s), requires_grad=True)) if is_bias: self._layer_bias_vectors.append(self._weights[-1]) else: self._layer_weight_tensors.append(self._weights[-1]) else: self._hyper_shapes_learned.append(s) self._hyper_shapes_learned_ref.append( \ len(self.param_shapes)) self._param_shapes.append(s) self._param_shapes_meta.append({ 'name': 'bias' if is_bias else 'weight', 'index': -1 if self._no_weights else \ len(self._weights)-1, 'layer': i * 2 + num_prev_layers, # Odd numbers }) prev_dim = n_fc return prev_dim ### Initial fully-connected layers. prev_dim = define_fc_layer_weights(self._fc_layers_pre, prev_dim, 1) ### Recurrent layers. coeff = 4 if self._use_lstm else 1 for i, n_rec in enumerate(self._rnn_layers): # Input-to-hidden s_w_ih = [n_rec*coeff, prev_dim] s_b_ih = [n_rec*coeff] if use_bias else None # Hidden-to-hidden s_w_hh = [n_rec*coeff, n_rec] s_b_hh = [n_rec*coeff] if use_bias else None # Hidden-to-output. # Note, for an LSTM cell, the hidden state vector is also the # output vector. if not self._use_lstm: s_w_ho = [n_rec, n_rec] s_b_ho = [n_rec] if use_bias else None else: s_w_ho = None s_b_ho = None for j, s in enumerate([s_w_ih, s_b_ih, s_w_hh, s_b_hh, s_w_ho, s_b_ho]): if s is None: continue is_bias = True if j % 2 == 0: is_bias = False wtype = 'ih' if 2 <= j < 4: wtype = 'hh' elif j >=4: wtype = 'ho' if not no_weights: self._weights.append(nn.Parameter(torch.Tensor(*s), requires_grad=True)) if is_bias: self._layer_bias_vectors.append(self._weights[-1]) else: self._layer_weight_tensors.append(self._weights[-1]) else: self._hyper_shapes_learned.append(s) self._hyper_shapes_learned_ref.append( \ len(self.param_shapes)) self._param_shapes.append(s) self._param_shapes_meta.append({ 'name': 'bias' if is_bias else 'weight', 'index': -1 if no_weights else len(self._weights)-1, 'layer': i * 2 + 1 + 2 * len(fc_layers_pre), # Odd numbers 'info': wtype }) prev_dim = n_rec ### Fully-connected layers. prev_dim = define_fc_layer_weights(self._fc_layers, prev_dim, \ 1 + 2 * len(fc_layers_pre) + 2 * len(rnn_layers)) ### Initialize weights. if init_weights is not None: assert self._weights is not None assert len(init_weights) == len(self.weights) for i in range(len(init_weights)): assert np.all(np.equal(list(init_weights[i].shape), self.weights[i].shape)) self.weights[i].data = init_weights[i] else: rec_start = len(fc_layers_pre) rec_end = rec_start + len(rnn_layers) * (2 if use_lstm else 3) # Note, Pytorch applies a uniform init to its recurrent layers, as # defined here: # https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/rnn.py#L155 for i in range(len(self._layer_weight_tensors)): if i >=rec_start and i < rec_end: # Recurrent layer weights. if kaiming_rnn_init: init_params(self._layer_weight_tensors[i], self._layer_bias_vectors[i] if use_bias else None) else: a = 1.0 / math.sqrt(rnn_layers[(i-rec_start) // \ (2 if use_lstm else 3)]) nn.init.uniform_(self._layer_weight_tensors[i], -a, a) if use_bias: nn.init.uniform_(self._layer_bias_vectors[i], -a, a) else: # FC layer weights. init_params(self._layer_weight_tensors[i], self._layer_bias_vectors[i] if use_bias else None) num_weights = MainNetInterface.shapes_to_num_weights(self._param_shapes) if verbose: if self._use_context_mod: cm_num_weights = \ MainNetInterface.shapes_to_num_weights(cm_shapes) print('Creating a simple RNN with %d weights' % num_weights + (' (including %d weights associated with-' % cm_num_weights + 'context modulation)' if self._use_context_mod else '') + '.') self._is_properly_setup() @property def bptt_depth(self): """Getter for attribute :attr:`bptt_depth`.""" return self._bptt_depth @bptt_depth.setter def bptt_depth(self, value): """Setter for attribute :attr:`bptt_depth`.""" self._bptt_depth = value @property def num_rec_layers(self): """Getter for read-only attribute :attr:`num_rec_layers`.""" return len(self._rnn_layers) @property def use_lstm(self): """Getter for read-only attribute :attr:`use_lstm`.""" return self._use_lstm def split_cm_weights(self, cm_weights, condition, num_ts=0): """Split context-mod weights per context-mod layer. Args: cm_weights (torch.Tensor): All context modulation weights. condition (optional, int): If provided, then this argument will be passed as argument ``ckpt_id`` to the method :meth:`utils.context_mod_layer.ContextModLayer.forward`. num_ts (int): The length of the sequences. Returns: (Tuple): Where the tuple contains: - **cm_inputs_weights**: The cm input weights. - **cm_fc_pre_layer_weights**: The cm pre-recurrent weights. - **cm_rec_layer_weights**: The cm recurrent weights. - **cm_fc_layer_weights**: The cm post-recurrent weights. - **n_cm_rec**: The number of recurrent cm layers. - **cmod_cond**: The context-mod condition. """ n_cm_rec = -1 cm_fc_pre_layer_weights = None cm_fc_layer_weights = None cm_inputs_weights = None cm_rec_layer_weights = None if cm_weights is not None: if self._context_mod_num_ts != -1 and \ self._context_mod_separate_layers_per_ts: assert num_ts <= self._context_mod_num_ts # Note, an mnet layer might contain multiple context-mod layers # (a recurrent layer can have a separate context-mod layer per # timestep). cm_fc_pre_layer_weights = [] cm_rec_layer_weights = [[] for _ in range(self._num_rec_cm_layers)] cm_fc_layer_weights = [] # Number of cm-layers per recurrent layer. n_cm_per_rec = self._context_mod_num_ts if \ self._context_mod_num_ts != -1 and \ self._context_mod_separate_layers_per_ts else 1 n_cm_rec = n_cm_per_rec * self._num_rec_cm_layers cm_start = 0 for i, cm_layer in enumerate(self.context_mod_layers): cm_end = cm_start + len(cm_layer.param_shapes) if i == 0 and self._context_mod_inputs: cm_inputs_weights = cm_weights[cm_start:cm_end] elif i < self._cm_rnn_start_ind: cm_fc_pre_layer_weights.append(cm_weights[cm_start:cm_end]) elif i >= self._cm_rnn_start_ind and \ i < self._cm_rnn_start_ind + n_cm_rec: # Index of recurrent layer. i_r = (i-self._cm_rnn_start_ind) // n_cm_per_rec cm_rec_layer_weights[i_r].append( \ cm_weights[cm_start:cm_end]) else: cm_fc_layer_weights.append(cm_weights[cm_start:cm_end]) cm_start = cm_end # We need to split the context-mod weights in the following case, # as they are currently just stacked on top of each other. if self._context_mod_num_ts != -1 and \ not self._context_mod_separate_layers_per_ts: for i, cm_w_list in enumerate(cm_rec_layer_weights): assert len(cm_w_list) == 1 cm_rnn_weights = cm_w_list[0] cm_rnn_layer = self.context_mod_layers[ \ self._cm_rnn_start_ind+i] assert len(cm_rnn_weights) == len(cm_rnn_layer.param_shapes) # The first dimension are the weights of this layer per # timestep. num_ts_cm = -1 for j, s in enumerate(cm_rnn_layer.param_shapes): assert len(cm_rnn_weights[j].shape) == len(s) + 1 if j == 0: num_ts_cm = cm_rnn_weights[j].shape[0] else: assert num_ts_cm == cm_rnn_weights[j].shape[0] assert num_ts <= num_ts_cm cm_w_chunked = [None] * len(cm_rnn_weights) for j, cm_w in enumerate(cm_rnn_weights): cm_w_chunked[j] = torch.chunk(cm_w, num_ts_cm, dim=0) # Now we gather all these chunks to assemble the weights # needed per timestep (as if # `_context_mod_separate_layers_per_t` were True). cm_w_list = [] for j in range(num_ts_cm): tmp_list = [] for chunk in cm_w_chunked: tmp_list.append(chunk[j].squeeze(dim=0)) cm_w_list.append(tmp_list) cm_rec_layer_weights[i] = cm_w_list # Note, the last layer does not necessarily have context-mod # (depending on `self._context_mod_outputs`). if len(cm_rec_layer_weights) < len(self._rnn_layers): cm_rec_layer_weights.append(None) if len(cm_fc_layer_weights) < len(self._fc_layers): cm_fc_layer_weights.append(None) ####################### ### Parse condition ### ####################### cmod_cond = None if condition is not None: assert isinstance(condition, int) cmod_cond = condition # Note, the cm layer will ignore
print(tcolors.ERROR + "ERROR: could not checkout ip '%s' at %s." % (ip['name'], ip['commit']) + tcolors.ENDC) errors.append("%s - Could not checkout commit %s" % (ip['name'], ip['commit'])); continue os.chdir(cwd) print('\n\n') print(tcolors.WARNING + "SUMMARY" + tcolors.ENDC) if len(errors) == 0: print(tcolors.OK + "IPs updated successfully!" + tcolors.ENDC) else: for error in errors: print(tcolors.ERROR + ' %s' % (error) + tcolors.ENDC) print() print(tcolors.ERROR + "ERRORS during IP update!" + tcolors.ENDC) sys.exit(1) os.chdir(owd) def flatten_ips(self, origin='origin', squash=False, dry_run=False): """Merges in all IPs as subtrees into this repository. The result is a flattened repository with a merged history of all IPs' histories. This is manually reversible. :param origin: The GIT remote to be used (by default 'origin') :type origin: str :param squash: If true, squash the IPs' history before flattening (merging) them. :type squash: bool :param dry_run: If true, just pretend to flatten. Useful for seeing what commands are being run. :type dry_run: bool """ errors = [] ips = self.ip_list git = "git" # make sure we are in the correct directory to start owd = os.getcwd() os.chdir(self.ips_dir) cwd = os.getcwd() for ip in ips: # check if path is SITE_DEPENDENT, in that case skip it if ip['path'][:20] == "$SITE_DEPENDENT_PATH": continue os.chdir(cwd) # check if directory already exists, this hints to the fact that we probably already cloned it if os.path.isdir("./%s" % ip['path']): errors.append("""%s - %s: exists already. git subtree only works when the path is not yet existing""" % (ip['name'], ip['path'])); # Not yet cloned, so we have to do that first else: os.chdir(owd) print(tcolors.OK + "\nFlattening IP '%s'..." % ip['name'] + tcolors.ENDC) # compose remote name server = ip['server'] if ip['server'] is not None else self.default_server group = ip['group'] if ip['group'] is not None else self.default_group if server[:5] == "https" or server[:6] == "git://": ip['remote'] = "%s/%s" % (server, group) else: ip['remote'] = "%s:%s" % (server, group) flatten_cmd = ("%s subtree add --prefix ips/%s%s %s/%s.git %s" % (git, ip['path'], ' --squash' if squash else '', ip['remote'], ip['name'], ip['commit'])) print(flatten_cmd) ret = 0 if not(dry_run): ret = execute(flatten_cmd) if ret != 0: print(tcolors.ERROR + """ERROR: could not git subtree, the remote probably doesn't exist OR is not reachable. You can try to refer to tags. You could also try to to remove the '%s' directory.""" % ip['name'] + tcolors.ENDC) errors.append("%s - Could not git subtree" % (ip['name'])); continue os.chdir(cwd) print('\n\n') print(tcolors.WARNING + "SUMMARY" + tcolors.ENDC) if len(errors) == 0: print(tcolors.OK + "IPs flattened (merged) successfully!!" + tcolors.ENDC) else: for error in errors: print(tcolors.ERROR + ' %s' % (error) + tcolors.ENDC) print() print(tcolors.ERROR + "ERRORS during IP flattening!" + tcolors.ENDC) sys.exit(1) os.chdir(owd) def delete_tag_ips(self, tag_name): """Deletes a tag for all IPs. :param tag_name: The tag to be removed :type tag_name: str This function removes a tag to all IPs (no safety checks). """ cwd = os.getcwd() ips = self.ip_list new_ips = [] for ip in ips: os.chdir("%s/%s" % (self.ips_dir, ip['path'])) ret = execute("git tag -d %s" % tag_name) os.chdir(cwd) def push_tag_ips(self, tag_name=None): """Pushes a tag for all IPs. :param tag_name: If not None, the name of the tag - else, the latest tag is pushed. :type tag_name: str or None Pushes the latest tagged version, or a specific tag, for all IPs. """ cwd = os.getcwd() ips = self.ip_list new_ips = [] for ip in ips: os.chdir("%s/%s" % (self.ips_dir, ip['path'])) if tag_name == None: newest_tag = execute_popen("git describe --tags --abbrev=0", silent=True).communicate() try: newest_tag = newest_tag[0].split() newest_tag = newest_tag[0] except IndexError: pass else: newest_tag = tag_name ret = execute("git push origin tags/%s" % newest_tag) os.chdir(cwd) # def push_ips(self, remote_name, remote): # cwd = os.getcwd() # ips = self.ip_list # new_ips = [] # for ip in ips: # os.chdir("%s/%s" % (self.ips_dir, ip['path'])) # ret = execute("git remote add %s %s/%s.git" % (remote_name, remote, ip['name'])) # ret = execute("git push %s master" % remote_name) # os.chdir(cwd) def tag_ips(self, tag_name, changes_severity='warning', tag_always=False, store=False): """Tags all IPs. :param tag_name: The name of the tag :type tag_name: str :param changes_severity: 'warning' or 'error' :type changes_severity: str :param tag_always: If True, tag even if an identical tag already exists :type tag_always: bool This function checks the newest tag, staged and unstaged changes; if it found changes it throws a warning or dies depending on the `changes_severity` setting. If no identical tag exists or `tag_always` is set to True, the current HEAD of the IP will be tagged with the given `tag_name`. """ cwd = os.getcwd() ips = self.ip_list new_ips = [] for ip in ips: os.chdir("%s/%s" % (self.ips_dir, ip['path'])) newest_tag, err = execute_popen("git describe --tags --abbrev=0", silent=True).communicate() unstaged_changes, err = execute_popen("git diff --name-only").communicate() staged_changes, err = execute_popen("git diff --cached --name-only").communicate() if staged_changes.split("\n")[0] != "": if changes_severity == 'warning': print(tcolors.WARNING + "WARNING: skipping ip '%s' as it has changes staged for commit." % ip['name'] + tcolors.ENDC + "\nSolve, commit and " + tcolors.BLUE + "git tag %s" % tag_name + tcolors.ENDC + " manually.") os.chdir(cwd) continue else: print(tcolors.ERROR + "ERROR: ip '%s' has changes staged for commit." % ip['name'] + tcolors.ENDC + "\nSolve and commit before trying to auto-tag.") sys.exit(1) if unstaged_changes.split("\n")[0] != "": if changes_severity == 'warning': print(tcolors.WARNING + "WARNING: skipping ip '%s' as it has unstaged changes." % ip['name'] + tcolors.ENDC + "\nSolve, commit and " + tcolors.BLUE + "git tag %s" % tag_name + tcolors.ENDC + " manually.") os.chdir(cwd) continue else: print(tcolors.ERROR + "ERROR: ip '%s' has unstaged changes." % ip['name'] + tcolors.ENDC + "\nSolve and commit before trying to auto-tag.") sys.exit(1) if newest_tag != "": output, err = execute_popen("git diff --name-only tags/%s" % newest_tag).communicate() else: output = "" if output.decode().split("\n")[0] != "" or newest_tag=="" or tag_always: ret = execute("git tag %s" % tag_name) if ret != 0: print(tcolors.WARNING + "WARNING: could not tag ip '%s', probably the tag already exists." % (ip['name']) + tcolors.ENDC) else: print("Tagged ip " + tcolors.WARNING + "'%s'" % ip['name'] + tcolors.ENDC + " with tag %s." % tag_name) newest_tag = tag_name try: newest_tag = newest_tag.split()[0] except IndexError: pass new_ips.append({'name': ip['name'], 'path': ip['path'], 'server': ip['server'], 'domain': ip['domain'], 'alternatives': ip['alternatives'], 'group': ip['group'], 'commit': "tags/%s" % newest_tag}) os.chdir(cwd) if store: store_ips_list("new_ips_list.yml", new_ips) def get_latest_ips(self, changes_severity='warning', new_ips_list='new_ips_list.yml'): """Collects current versions for all IPs. :param tag_name: The name of the tag :type tag_name: str :param changes_severity: 'warning' or 'error' :type changes_severity: str :param new_ips_ist: Name of the new `ips_list.yml` file (defaults to `new_ips_list.yml`) :type new_ips_ist: str This function collects the latest version of all IPs from the local repo and stores it in a new `ips_list.yml` file. If there are changes (staged or unstaged) it will throw a warning, or die if `changes_severity` is set to 'error'. """ cwd = os.getcwd() ips = self.ip_list new_ips = [] for ip in ips: os.chdir("%s/%s" % (self.ips_dir, ip['path'])) #commit, err = execute_popen("git checkout master", silent=True).communicate() #commit, err = execute_popen("git pull", silent=True).communicate() #commit, err = execute_popen("git log -n 1 --format=format:%H", silent=True).communicate() commit, err = execute_popen("git describe --tags --always", silent=True).communicate() unstaged_changes, err = execute_popen("git diff --name-only").communicate() staged_changes, err = execute_popen("git diff --cached --name-only").communicate() if staged_changes.decode().split("\n")[0] != "": if changes_severity == 'warning': print(tcolors.WARNING + "WARNING: skipping ip '%s' as it has changes staged for commit." % ip['name'] + tcolors.ENDC + "\nSolve and commit manually.") os.chdir(cwd) continue else: print(tcolors.ERROR + "ERROR: ip '%s' has changes staged for commit." % ip['name'] + tcolors.ENDC + "\nSolve and commit before trying to get latest version.") sys.exit(1) if unstaged_changes.decode().split("\n")[0] != "": if changes_severity == 'warning': print(tcolors.WARNING + "WARNING: skipping ip '%s' as it has unstaged changes." % ip['name'] + tcolors.ENDC + "\nSolve and commit manually.") os.chdir(cwd) continue else: print(tcolors.ERROR + "ERROR: ip '%s' has unstaged changes." % ip['name'] + tcolors.ENDC + "\nSolve and commit before trying to get latest version.") sys.exit(1) new_ips.append({'name': ip['name'], 'path': ip['path'], 'server': ip['server'], 'domain': ip['domain'], 'alternatives': ip['alternatives'], 'group': ip['group'], 'commit': "%s" % commit.decode().rstrip()}) os.chdir(cwd) store_ips_list(new_ips_list, new_ips) def export_make(self, abs_path="$(IP_PATH)", script_path="./", more_opts="", source='ips', target_tech=None, local=False, simulator='vsim', library_name="ncsim_libs"): """Exports
import winreg from os import scandir, makedirs, getenv from re import sub, compile, escape from textwrap import fill from time import sleep from urllib.parse import urlencode import inquirer import malclient import pyloader from bs4 import BeautifulSoup from msedge.selenium_tools import Edge, EdgeOptions from tabulate import tabulate from urllib3 import PoolManager import selenium_installer class ahframework: def __init__(self): self.client = malclient.Client() self.client.init( refresh_token="def502007b613dc7114efb6b2e0dc50593a060fbfedda409cea33a2eb409824edc6c95a1e1d5c6c7c0eeb40e7f5ec3f8b9fe1a249248b7a73b352efa1526ad20405b2611b1608ac1f1cd60d7e8445e0879fa35928a90b5b82129f8a6360e212e84cf7b9ef4aed3f4387e708b1e5c5f83dc43c9e43aa8e0dfd24d4e6c9a2559b929b73af4bed8499c2255f22b130ee491ccd0212b3f14505b5c25624dded5b72c71427540b0b08b8b2d696470ac83f4b48db6053b2a74cc757e53c37fa6b16d73049572c69c09012d9687208a9c1ff9f91d2f34e46d1e376ba97d34834db68a6e6aa1c4adf28f37d2e7f305c3b4b54010309119a32f6a55d56afb8b751210ecce2a667f45cd8750caf6d506167c220bf97eb35ffd3c2f9d9a4011819968892be95905e678ecffc34e42ccce8727198179c84e19a054b92b33cd1553d1c281e020e69c298a25912f1b9b697fe43bd81e9008c88d53d17206f73001a7b5b0212194f2ee00e8f46ca49d8752e9749cdfcdfb17c1315fea22edf0f04e117ccabbe3322851e34510d35dfa1e") self.auth = self.client.refresh_bearer_token( client_secret='', client_id="421a09b495c9559a458eb06c8c5f41c1", refresh_token="def502007b613dc7114efb6b2e0dc50593a060fbfedda409cea33a2eb409824edc6c95a1e1d5c6c7c0eeb40e7f5ec3f8b9fe1a249248b7a73b352efa1526ad20405b2611b1608ac1f1cd60d7e8445e0879fa35928a90b5b82129f8a6360e212e84cf7b9ef4aed3f4387e708b1e5c5f83dc43c9e43aa8e0dfd24d4e6c9a2559b929b73af4bed8499c2255f22b130ee491ccd0212b3f14505b5c25624dded5b72c71427540b0b08b8b2d696470ac83f4b48db6053b2a74cc757e53c37fa6b16d73049572c69c09012d9687208a9c1ff9f91d2f34e46d1e376ba97d34834db68a6e6aa1c4adf28f37d2e7f305c3b4b54010309119a32f6a55d56afb8b751210ecce2a667f45cd8750caf6d506167c220bf97eb35ffd3c2f9d9a4011819968892be95905e678ecffc34e42ccce8727198179c84e19a054b92b33cd1553d1c281e020e69c298a25912f1b9b697fe43bd81e9008c88d53d17206f73001a7b5b0212194f2ee00e8f46ca49d8752e9749cdfcdfb17c1315fea22edf0f04e117ccabbe3322851e34510d35dfa1e") self._usr_inp = None self._sel_ani = None self._anime_search_result = None self._anime_search_result_string = None self.title = None self.alt_title = None self.year_aired = None self.score = None self.score_count = None self.media = None self.status = None self.genres = None self.episodes = None self.synopsis = None self.my_list = None self.query_title = None self.file_title = None self.gogoanime = False def downloader(self, mode): def progress_callback(progress): print(f'\rDownloading File: {progress.dlable.file_name} Progress: ' + '{0:.2f}%'.format( progress.percent), end='') # `return True` if the download should be canceled return False loader = pyloader.Loader.get_loader() loader.configure( max_concurrent=1, progress_cb=progress_callback, update_interval=3, daemon=False ) loader.start() options = EdgeOptions() options.use_chromium = True options.headless = False options.add_argument(f"--user-data-dir={getenv('LOCALAPPDATA')}\\Microsoft\\Edge\\Generated Data") options.add_argument("--profile-directory=Default") options.add_argument("--disable-extensions") options.add_argument("--headless") options.add_argument('--log-level=3') driver = Edge(options=options) _custom_episode = int() _queue = list() with winreg.OpenKey(winreg.HKEY_CURRENT_USER, 'SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders') as key: download_location = winreg.QueryValueEx(key, '{374DE290-123F-4565-9164-39C4925E467B}')[0] if mode == 'Single': while _custom_episode == 0: try: _custom_episode = int(input(f'[+] Which episode you wish to download? (1-{self.episodes}): ')) except ValueError: print(f'[!] Please enter a valid episode (1 - {self.episodes}') else: _file_list = scandir(download_location + '/Downloader') try: makedirs(f'{download_location}\\Downloader\\{self.file_title}') _queue = [ep for ep in list(range(1, self.episodes + 1))] except FileExistsError: _queue = [ep for ep in list(range(1, self.episodes + 1)) if ep not in ([int(((str(entry)).split(' '))[-1].split('.')[0]) for entry in (scandir( download_location + '/Downloader/' + self.file_title))])] # _queue = [ep for ep in list(range(1, self.episodes + 1)) if ep not in [int((list(str( # ep.name).split(' '))[-1].split('.'))[0]) for ep in scandir(download_location + '/Downloader/' + # self.file_title)]] if len(_queue) == 0: print('[+] File already exists without missing an episode. Please look for a new series!') mainmenu() print('[+] Found existing folder in the directory, checking for episodes ... ') print(f'[+] Downloading missing episodes: {", ".join(str(v) for v in _queue)}.') def get_files(url, episode): print(fill( f'\n\n[+] Getting download link from {url} for {self.title} Episode {episode}/{self.episodes}', 80)) driver.get(url) try: driver.find_element_by_css_selector("#main > div:nth-child(7) > div").click() window_after = driver.window_handles[1] driver.close() driver.switch_to.window(window_after) _download_link = driver.find_element_by_css_selector( '#main > div > div.content_c > div > div:nth-child(5) > div:nth-child(3) > a').get_attribute('href') target = pyloader.DLable(url=_download_link, target_dir=f'{download_location}\\Downloader\\{self.file_title}', file_name=f'{self.file_title} Episode {str(episode)}.mp4') loader.download(target) except: print('[+] Ouch, I missed the download button. Care to try again?') self.aninfo() while loader.is_active(): sleep(2) def get_files_gogo(url, episode): print(fill( f'\n\n[+] Getting download link from {url} for {self.title} Episode {episode}/{self.episodes}', 80)) driver.get(url) try: driver.find_element_by_xpath( '#wrapper_bg > section > section.content_left > div:nth-child(1) > div.anime_video_body > ' 'div.anime_video_body_cate > div.favorites_book > ul > li.dowloads > a').click() window_after = driver.window_handles[1] driver.close() driver.switch_to.window(window_after) _download_link = driver.find_element_by_css_selector( '#main > div > div.content_c > div > div:nth-child(5) > div:nth-child(3) > a').get_attribute('href') target = pyloader.DLable(url=_download_link, target_dir=f'{download_location}\\Downloader\\{self.file_title}', file_name=f'{self.file_title} Episode {str(episode)}.mp4') loader.download(target) except: print('[+] Ouch, I missed the download button. Care to try again?') self.aninfo() while loader.is_active(): sleep(2) if _custom_episode != 0 and not self.gogoanime: link = ('https://animekisa.tv/' + (str(self.query_title).split('/'))[-1] + '-episode-' + str( _custom_episode)) get_files(link, _custom_episode) print('[+] Download complete!\n') mainmenu() elif _custom_episode == 0 and not self.gogoanime: link = [('https://animekisa.tv/' + (str(self.query_title).split('/'))[-1] + '-episode-' + str(x)) for x in _queue] _index = int(_queue[0]) for _item in link: get_files(_item, _index) print() _index += 1 print('[+] Download complete!\n') mainmenu() elif _custom_episode != 0 and self.gogoanime: link = ('https://gogoanime.pe/' + (str(self.query_title).split('/'))[-1] + '-episode-' + str( _custom_episode)) get_files_gogo(link, _custom_episode) print('[+] Download complete!\n') mainmenu() elif _custom_episode == 0 and self.gogoanime: link = [('https://gogoanime.pe/' + (str(self.query_title).split('/'))[-1] + '-episode-' + str(x)) for x in _queue] _index = int(_queue[0]) for _item in link: get_files(_item, _index) print() _index += 1 print('[+] Download complete!\n') mainmenu() def aninfo(self): while True: self._usr_inp = input('[+] Search Anime: ') if len(self._usr_inp) == 0: print('[!] Goodbye!') sleep(0.3) mainmenu() else: self._anime_search_result = self.client.search_anime(self._usr_inp) for _index, _anime in enumerate(self._anime_search_result): del self._anime_search_result[_index + 1] self._anime_search_result_string = [fill(_anime.title, 80) for _anime in self._anime_search_result] self._anime_search_result_string.append('[CANCEL]') self._anime_search_result_string.append('[CHANGE SEARCH ENGINE]') _anime_selected = inquirer.prompt([inquirer.List('selected anime', message="Which one?", choices=list(self._anime_search_result_string))])[ 'selected anime'] if _anime_selected == '[CANCEL]': mainmenu() elif _anime_selected == '[CHANGE SEARCH ENGINE]': html = PoolManager() _search_engine = inquirer.prompt([inquirer.List('selected engine', message="Which anime provider you'd like to use?", choices=['MyAnimeList', 'AnimeKisa', 'GogoAnime'])])['selected engine'] print(f'[+] Search engine {_search_engine} selected!') if _search_engine == 'MyAnimeList': pass elif _search_engine == 'AnimeKisa': print('[+] Changed search engine to AnimeKisa!\n') _title_name = None _initial_search = html.request('GET', 'https://animekisa.tv/search?q=' + str( (urlencode({'q': f'{self._usr_inp}'}).split('='))[-1])) _initial_search_result = ( BeautifulSoup(_initial_search.data, features='html.parser')).select( '.lisbox22 .similarbox .centered div') if _initial_search.status != 200: print( f'[!] AnimeKisa error code {_initial_search.status}! Please try again in several ' f'minutes!') mainmenu() elif len(_initial_search_result) == 0: print('[!] Could not find anything. Please try again. ') self.aninfo() else: _title_name = (inquirer.prompt([inquirer.List('Selected', message="Which one?", choices=[_item.text.replace('\n', '') for _index, _item in enumerate(_initial_search_result) if _index % 2 == 0])]))[ 'Selected'] if list(_title_name)[-1] == ' ': _title_name = _title_name[:-1] if list(_title_name)[0] == ' ': _title_name = _title_name[0:] rep = {" ": "-", ":": "", "’": "-", "?": "", "!": "", ".": "", "/": "-", '★': '', '%': '', '+': '', '=': '', '³': '-'} rep = dict((escape(k), v) for k, v in rep.items()) pattern = compile("|".join(rep.keys())) self.query_title = pattern.sub(lambda m: rep[escape(m.group(0))], _title_name.casefold()) _anime_info = BeautifulSoup( (html.request('GET', f'https://animekisa.tv/{self.query_title}')).data, features='html.parser') _anime_details = [("".join(c.find_all(text=True))) for c in _anime_info.find_all('div', {'class': 'textc'}, text=True)] _anime_details.append( str(_anime_info.find('div', {'class': 'infodes2'}).getText()).replace('\'', '’')) self.genres = [("".join(g.find_all(text=True))) for g in _anime_info.find_all('a', {'class': 'infoan'}) if not g.has_attr('target')] if len(_anime_details) == 3: _anime_details.insert(0, _title_name) self.alt_title = _anime_details[0] self.status = _anime_details[1] self.episodes = int(_anime_details[2]) self.synopsis = _anime_details[3] self.title = _title_name if self.episodes == '?': self.episodes = int(_anime_info.find('div', {'class': 'infoept2'}).getText()) elif _search_engine == 'GogoAnime': print('[+] Changed search engine to GogoAnime\n') self.gogoanime = True _initial_requests = html.request('GET', 'https://gogoanime.pe//search.html?keyword=' + str( (urlencode({'q': f'{self._usr_inp}'}).split('='))[-1])) if _initial_requests.status != 200: print( f'[!] Gogoanime error code {_initial_requests.status}! Please try again in ' f'several minutes!') quit() _search_result = BeautifulSoup(_initial_requests.data, features='html.parser') _search_result = _search_result.select('.last_episodes .items .name a') _title_list = [] _href_list = [] _title_link = '' _title_name = '' if len(_search_result) == 0: print('[!] Could not find anything. Please try again!') self.aninfo() else: for _index, _item in enumerate(_search_result): _title_list.append(_item.text) _href_list.append(_item['href']) _title_list.append('Cancel') selection = [inquirer.List('Selected', message="Which one?", choices=_title_list)] _title_name = (inquirer.prompt(selection))['Selected'] if _title_name == 'Cancel': mainmenu() _index_bridge = int(_title_list.index(f"{_title_name}")) _title_link = _href_list[_index_bridge] _details_result = html.request('GET', 'https://gogoanime.pe/' + _title_link) _details_result = BeautifulSoup(_details_result.data, features='html.parser') infodes = [_details.text.replace('\n', '') for _details in _details_result.find_all('p', {'class': 'type'})] infodes = [_det if ':' not in _det else (_det.split(': ')[-1]) for _det in infodes] infodes.append(_details_result.find('a', {'class': 'active'}).text.split('-')[-1]) self.title = _title_name self.alt_title = infodes[5] self.genres = infodes[2] self.media = infodes[0] self.status = infodes[4] self.year_aired = infodes[3] self.episodes = int(infodes[6]) self.synopsis = infodes[1] else: _anime_details = self.client.get_anime_details( self._anime_search_result[int(self._anime_search_result_string.index(_anime_selected))].id) self.title = _anime_details.title self.alt_title = _anime_details.alternative_titles.en try: self.year_aired = _anime_details.start_season.year except AttributeError: self.year_aired = '?' self.year_aired = _anime_details.start_season.year self.score = _anime_details.mean self.media = _anime_details.media_type self.status = _anime_details.status self.genres = ", ".join( [_anime_details.genres[gen_index].name for gen_index in range(0, len(_anime_details.genres))]) self.episodes = int(_anime_details.num_episodes) self.synopsis = _anime_details.synopsis try: self.my_list = _anime_details.my_list_status except AttributeError: self.my_list = '-' try: print(tabulate([['Title', self.title], ['Alternative Title', self.alt_title], ['Score', self.score], ['Genre', self.genres], ['Type', str(self.media).upper()], ['Status', (str(self.status).replace("_", ' ')).title()], ['Released', self.year_aired], ['Episodes', self.episodes]], tablefmt='orgtbl'), '\n\n' + fill(self.synopsis, 80), '\n') except AttributeError: print('[!] Error, incomplete data. ') if self.query_title is None: rep = {" ": "-", ":": "", "’": "-", "?": "", "!": "", ".": "", "/": "-", '★': '', '%': '', '+': '', '=': '', '³': '-'} rep = dict((escape(k), v) for k, v in rep.items()) pattern = compile("|".join(rep.keys())) self.query_title = pattern.sub(lambda m: rep[escape(m.group(0))], self.title.casefold()) self.file_title = sub('[^A-Za-z0-9-,! ]+', '', self.title) download_confirmation = (inquirer.prompt( [inquirer.List('Selected', message="Proceed to download??", choices=['Cancel', 'All', 'Single'])]))[ 'Selected'] if download_confirmation == 'Single': ahf.downloader(mode='Single') elif download_confirmation == 'Cancel': ahf.aninfo() elif download_confirmation == 'All': ahf.downloader(mode='All') if __name__ == '__main__': ahf = ahframework() def mainmenu(): print( '=' * 55 + '\n _ _ _ _ \n| | | |___ ___| |_ AnimeHub Framework by Neek0tine\n| | | | -_| -_| . ' '| Version 0.1\n|_____|___|___|___| https://github.com/neek0tine\n' + '=' * 55) main_selection = \ (inquirer.prompt([inquirer.List('Main Menu', message="What to do?", choices=['Search Anime', 'Update ongoing series', 'Update MAL data',
''' @author: <NAME> (jakpra) @copyright: Copyright 2020, <NAME> @license: Apache 2.0 ''' import sys import math from operator import itemgetter from collections import OrderedDict, Counter import time import random import torch import torch.nn.functional as F import torch.optim as optim from .oracle.oracle import make_unordered_valid_loss # import ExAssist as EA UNK = '<UNKNOWN>' PAD = '<PADDING>' START = '<START>' END = '<END>' SEP = '<SEP>' def create_emb_layer(weights_matrix, trainable=True): num_embeddings, embedding_dim = weights_matrix.size() emb_layer = torch.nn.Embedding(num_embeddings, embedding_dim) emb_layer.load_state_dict({'weight': weights_matrix}) if not trainable: emb_layer.weight.requires_grad = False return emb_layer, num_embeddings, embedding_dim OPTIM = {'sgd': lambda params, **kwargs: optim.SGD(params, lr=kwargs['lr'], momentum=kwargs['momentum']), 'adam': lambda params, **kwargs: optim.Adam(params, lr=kwargs['lr'], eps=kwargs['eps'], weight_decay=kwargs['weight_decay']), 'adamw': lambda params, **kwargs: optim.AdamW(params, lr=kwargs['lr'], eps=kwargs['eps'], weight_decay=kwargs['weight_decay']), 'adagrad': optim.Adagrad} def load_model_states(state_dict): # state_dict = torch.load(filename) # create new OrderedDict that does not contain `module.` new_state_dict = OrderedDict() for k, v in state_dict.items(): if 'O_T' in k: # otherwise the shared trained parameters will be overwritten with untrained ones? continue if k.startswith('module.'): new_state_dict[k[7:]] = v else: new_state_dict[k] = v # checkpoint = torch.load(args.model, map_location=device) # net.load_state_dict(new_state_dict) return new_state_dict def compute_acc_and_loss(task, gen, y, y_hat, address_mask, word_mask, criterion, mapped_criterion, batch_size, seq_len, address_dim, output_dim, loss_fxn=False, oracle=None, omega_native_atom=0., omega_atom=0., omega_full=0., lambda_enc=0.1, lambda_dec=0., enc_attn=None, dec_attn=None, deps=None, dep_attn_criterion=None, dep_norm=lambda t: F.normalize(t, p=1, dim=1), output_correct_bool=False): y = y.to(y_hat).long() # print('y', y.size(), y[0, 3], [(i+1, gen.ix_to_out[j.item()]) for i, j in enumerate(y[0, 3])], file=sys.stderr) word_mask = word_mask.to(address_mask) words = word_mask.float().sum() y_hat = y_hat.view(batch_size, -1, address_dim, output_dim) address_mask = address_mask.view(batch_size, -1, address_dim) y_hat_len = y_hat.size(1) if y_hat_len < seq_len: y_hat = torch.cat([y_hat, torch.zeros(batch_size, seq_len - y_hat_len, address_dim, output_dim).to(y_hat)], dim=1) address_mask = torch.cat([address_mask, torch.zeros(batch_size, seq_len - y_hat_len, address_dim).to(address_mask)], dim=1) elif y_hat_len > seq_len: y_hat = y_hat[:, :seq_len] address_mask = address_mask[:, :seq_len] argmaxes = torch.argmax(y_hat, dim=3) categories_gold = gen.extract_outputs(y.view(batch_size, seq_len, address_dim)) categories_hat = gen.extract_outputs(argmaxes) if loss_fxn in ('avg', 'all'): # print('compute dynamic loss') loss = criterion[0](y_hat, categories_hat if oracle is None else gen.extract_outputs(oracle), categories_gold) / words else: # TODO: change SEP to PAD in y, so that loss ignores it y_hat = y_hat.reshape(-1, output_dim) address_mask = address_mask.reshape(batch_size, -1, address_dim) y = y.view(-1) # native loss # y = y.view(-1) # y_hat = y_hat.transpose(1, -1).reshape(batch_size, output_dim, -1) # sum everything, then normalize over batch and sequence, but not over addresses # print(criterion) # print(y_hat.shape, y.shape) native_loss = criterion[0](y_hat, y) / words # (batch_size * words) # y.view(-1) # average over everything (incl addresses) native_atomic_loss = criterion[1](y_hat, y) # y.view(-1) # category-level loss # category_loss = atomic_loss / address_dim # TODO: check which one of these is really correct # # category-level loss # y = y.view(-1, address_dim) # # y_hat = y_hat.view(-1, output_dim, address_dim) # y_hat = y_hat.view(-1, address_dim, output_dim).transpose(1, 2) # category_loss = criterion(y_hat, y) / (batch_size * seq_len) if hasattr(gen, 'address_map'): address_mask = address_mask.view(batch_size, -1, address_dim) mask = (~word_mask).unsqueeze(-1).expand(-1, -1, address_dim) | (~address_mask) argmaxes[mask] = gen.out_to_ix[PAD] atomic_output_dim = gen.address_map.output_dim atomic_address_dim = gen.address_map.address_dim mapped_y = gen.address_map(y.view(-1, address_dim), indices=True, argmax=True) # print('mapped_y', mapped_y.size(), mapped_y.view(batch_size, seq_len, atomic_address_dim)[0, :, :6], file=sys.stderr) # exit(0) # print('mapped_y', mapped_y.size(), mapped_y[3, :6], [(i+1, gen.address_map.ix_to_out[j.item()]) for i, j in enumerate(mapped_y[3])], file=sys.stderr) # print('y_hat', y_hat.size(), y_hat[0, 3, :6], file=sys.stderr) mapped_y_hat = gen.address_map(y_hat.view(-1, address_dim, output_dim), norm=True) # print('mapped_y_hat', mapped_y_hat.size(), mapped_y_hat[3, :6], file=sys.stderr) if loss_fxn not in ('avg', 'all'): full_loss = mapped_criterion[0](mapped_y_hat.view(-1, atomic_output_dim), mapped_y.view(-1)) / words atomic_loss = mapped_criterion[1](mapped_y_hat.view(-1, atomic_output_dim), mapped_y.view(-1)) # full_loss = criterion(mapped_y_hat.view(-1, atomic_address_dim, atomic_output_dim).transpose(1, 2), mapped_y.view(-1, atomic_address_dim)) / (batch_size * seq_len * atomic_address_dim) # mask = mask.view(-1, atomic_address_dim) # print('mask', mask.size(), file=sys.stderr) # print('argmaxes', argmaxes.size(), argmaxes[0, 3, :6], file=sys.stderr) mapped_argmaxes = gen.address_map(argmaxes.view(-1, address_dim), indices=True, argmax=True).view(batch_size, -1, atomic_address_dim) # print('mapped_argmaxes', mapped_argmaxes.size(), mapped_argmaxes[0, :, :6], file=sys.stderr) correct_bool = torch.all(torch.eq(mapped_argmaxes, mapped_y.view(batch_size, -1, atomic_address_dim)), dim=2) else: full_loss = atomic_loss = 0. argmaxes = argmaxes.view(batch_size, -1) address_mask = address_mask.view(batch_size, -1) argmaxes[~address_mask] = gen.out_to_ix[PAD] y_hat_seps = (argmaxes == gen.out_to_ix[SEP]).nonzero() # indices of separators in pred: [[b0, s0], [b1, s1], ...] y = y.view(batch_size, -1) y_seps = (y == gen.out_to_ix[SEP]).nonzero() # indices of separators in gold max_words = word_mask.size(1) correct_bool = torch.zeros(batch_size, max_words, dtype=torch.bool).to(word_mask) # correct_bool = torch.eq(argmaxes, y.view(batch_size, -1, address_dim)) last_batch = 0 last_y_hat_sep = 0 last_y_sep = 0 i = 0 y_hat_seps = iter(y_hat_seps) try: for yb, ys in y_seps: yb, ys = yb.item(), ys.item() if yb != last_batch: last_y_sep = 0 i = 0 if i >= max_words: continue try: yhb, yhs = next(y_hat_seps) yhb, yhs = yhb.item(), yhs.item() while yhb != yb: yhb, yhs = next(y_hat_seps) yhb, yhs = yhb.item(), yhs.item() except StopIteration: correct_bool[yb, i] = False else: correct_bool[yb, i] = yhs-last_y_hat_sep == ys-last_y_sep and torch.all(torch.eq(argmaxes[yhb, last_y_hat_sep:yhs], y[yb, last_y_sep:ys])) last_y_hat_sep = yhs last_batch, last_y_sep, i = yb, ys, i+1 except ValueError as e: raise ValueError(*e.args, y_hat_seps, y_seps) except IndexError as e: raise IndexError(*e.args, f'yb={yb}, last_batch={last_batch}, ys={ys}, last_y_sep={last_y_sep}, i={i}') category_acc = (correct_bool & word_mask).float().sum() / words has_enc_attn = enc_attn is not None has_dec_attn = dec_attn is not None if loss_fxn not in ('avg', 'all'): loss = (1. - omega_native_atom - omega_atom - omega_full) * native_loss + \ omega_native_atom * native_atomic_loss + \ omega_atom * atomic_loss + \ omega_full * full_loss lbda = 1. - int(has_enc_attn) * lambda_enc - int(has_dec_attn) * lambda_dec loss = loss.clone() * lbda if deps is None: # loss += torch.sum(torch.abs(dec_attn)) / (batch_size * seq_len * address_dim) pass else: if has_dec_attn: dec_deps = torch.diagflat(torch.ones(seq_len * address_dim, dtype=torch.float32) ).view(seq_len, address_dim, seq_len, address_dim) dec_deps = dec_deps.unsqueeze(0).repeat(batch_size, 1, 1, 1, 1) if has_enc_attn: enc_deps = torch.diagflat(torch.ones(seq_len, dtype=torch.float32)) enc_deps = enc_deps.unsqueeze(0).unsqueeze(2).repeat(batch_size, 1, address_dim, 1) for n, seq in enumerate(deps): for i, args in enumerate(seq): if not word_mask[n, i].item(): break for a, j, b in args: if not address_mask[n, i, a].item(): continue d_a = math.floor(math.log2(a+1)) p_a = (a+1) // 2 - 1 if has_enc_attn: enc_deps[n, i, a, i] = 0. enc_deps[n, i, a, j] += 1. # parent slash enc_deps[n, i, p_a, i] = 0. enc_deps[n, i, p_a, j] += 1. # children and descendents for log_breadth, depth in enumerate(range(d_a+1, gen.max_depth), start=1): first_addr = 2 ** depth - 1 any_at_depth = False for c_a in range(first_addr, first_addr+2**log_breadth): if address_mask[n, i, c_a].item(): any_at_depth = True enc_deps[n, i, c_a, i] = 0. enc_deps[n, i, c_a, j] += 1. if not any_at_depth: break # TODO: not sure about this one # enc_deps[j, n, j, b] = 0. # enc_deps[i, n, j, b] += 1. if has_dec_attn: d_b = math.floor(math.log2(b+1)) if d_b < d_a: # head's attn to deps (note that key of attn has to be in first dim for KLLoss) # (key_token, key_addr, batch, query_token, query_addr) dec_deps[n, i, a, i, a] = 0. dec_deps[n, i, a, j, b] = 1. elif d_a < d_b: # dep's attn to heads (note that key of attn has to be in first dim for KLLoss) # (key_token, key_addr, batch, query_token, query_addr) dec_deps[n, j, b, j, b] = 0. dec_deps[n, j, b, i, a] = 1. if has_dec_attn: dec_deps = dec_deps.view(-1, seq_len*address_dim).to(dec_attn) # total_batch_size, self.address_dim, seq_len, self.address_dim dec_attn = dec_attn.view(-1, seq_len*address_dim) # .permute(2, 3, 0, 1).reshape dec_attn_loss = dep_attn_criterion(F.log_softmax(dec_attn, dim=1), dep_norm(dec_deps)) loss += lambda_dec * dec_attn_loss del dec_attn, dec_deps if has_enc_attn: enc_deps = enc_deps.view(-1, seq_len).to(enc_attn) # total_batch_size, self.address_dim, seq_len enc_attn = enc_attn.view(-1, seq_len) # .permute(2, 0, 1).reshape enc_attn_loss = dep_attn_criterion(F.log_softmax(enc_attn, dim=1), dep_norm(enc_deps)) loss += lambda_enc * enc_attn_loss del enc_attn, enc_deps result = category_acc, loss if output_correct_bool: result = (*result, (argmaxes, correct_bool, categories_hat, categories_gold)) del word_mask, address_mask, y, y_hat return result # TODO: append accs and losses to a file so they don't get overwritten by subsequent runs def train(net, trainloader, optimizer, devloader=None, criterion=torch.nn.CrossEntropyLoss, dep_attn_criterion=torch.nn.KLDivLoss, # optimizer_name='sgd', learning_rate=0.001, momentum=0.9, epochs=1, start_epoch=0, dev_acc=0.0, dev_loss=None, seed=42, loss_fxn='crossent', omega_native_atom=0., omega_atom=0., omega_full=0., lambda_enc=0.1, lambda_dec=0., batch_size=4, max_batches=None, n_print=100, model='ccg-glove', device='cpu', device_ids=[0]): # , device='cpu', device_ids=[0]): random.seed(seed) # device = device # torch.device(f'cuda:{cuda_device}' if cuda.is_available() else 'cpu') # # _optimizer = optimizer if optimizer is not None \ # else OPTIM.get(optimizer_name, optim.SGD)(net.parameters(), lr=learning_rate, momentum=momentum) torch.autograd.set_detect_anomaly(True) # atomic_criterion = criterion(ignore_index=net.out_to_ix[PAD], reduction='sum') # category_criterion = criterion(ignore_index=net.out_to_ix[PAD], reduction='sum') criteria = [] mapped_criteria = [] dep_attn_criteria = [] for gen in net.generators: # weight = torch.ones(gen.output_dim, dtype=torch.float32) # weight.index_fill_(0, gen.grow_label_ix, 2.) if loss_fxn in ('avg', 'all'): # print('instantiate dynamic loss') criteria.append((make_unordered_valid_loss(gen.out_to_ix, fxn=loss_fxn), None)) else: criteria.append((criterion(ignore_index=gen.out_to_ix[PAD], reduction='sum'),
#!/usr/bin/env python ## Fraunhofer IIs ## <NAME> ## Mostly base on Spawn_npc.py from tutorial ### Carla Traffic Manager ### Vehicles selfcontrol import glob import os import sys import time import pandas as pd try: sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( sys.version_info.major, sys.version_info.minor, 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) except IndexError: pass import carla import argparse import logging import random import copy def main(): argparser = argparse.ArgumentParser( description=__doc__) argparser.add_argument( '--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') argparser.add_argument( '-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') argparser.add_argument( '-n', '--number-of-vehicles', metavar='N', default=10, type=int, help='number of vehicles (default: 10)') argparser.add_argument( '-w', '--number-of-walkers', metavar='W', default=50, type=int, help='number of walkers (default: 50)') argparser.add_argument( '--assigned', default=True, type=bool, help='one Vehicle assigned, audi.tt' ) argparser.add_argument( '--safe', action='store_true', help='avoid spawning vehicles prone to accidents') argparser.add_argument( '--res', metavar='WIDTHxHEIGHT', default='1280x720', help="window resolution (default: 1280x720)") argparser.add_argument( '--filterv', metavar='PATTERN', default='vehicle.*', help='vehicles filter (default: "vehicle.*")') argparser.add_argument( '--filterw', metavar='PATTERN', default='walker.pedestrian.*', help='pedestrians filter (default: "walker.pedestrian.*")') argparser.add_argument( '-tm_p', '--tm_port', metavar='P', default=8000, type=int, help='port to communicate with TM (default: 8000)') argparser.add_argument( '--sync', action='store_true', help='Synchronous mode execution') argparser.add_argument( '-m', '--map_name', default='Town01', type=str, help='map name to load in the server (default: Town01)') args = argparser.parse_args() args.width, args.height = [int(x) for x in args.res.split('x')] logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO) vehicles_list = [] walkers_list = [] all_id = [] client = carla.Client(args.host, args.port) client.load_world(map_name=args.map_name) client.set_timeout(10.0) ## Datasets path path_dataset = os.getcwd() + "/Datasets/" dataset_name = "audi_tt.csv" path_dataset = path_dataset + dataset_name if os.path.isfile(path_dataset): newfile = False else: newfile = True try: traffic_manager = client.get_trafficmanager(args.tm_port) ## tm set the global distance to other vehicles traffic_manager.set_global_distance_to_leading_vehicle(4.0) ## tm set the hybrid mode world = client.get_world() synchronous_master = False if args.sync: ## synchronous mode settings = world.get_settings() traffic_manager.set_synchronous_mode(True) if not settings.synchronous_mode: synchronous_master = True settings.synchronous_mode = True settings.fixed_delta_seconds = 0.05 ## get a fixed time-step in between frames (0.05 sec) = 20 world.apply_settings(settings) else: synchronous_master = False blueprints = world.get_blueprint_library().filter(args.filterv) blueprintsWalkers = world.get_blueprint_library().filter(args.filterw) ## get traffic lights and set them to green # actor_trafficlights = world.get_actors().filter('traffic.traffic_light*') # for light in actor_trafficlights: # light.set_state(carla.TrafficLightState.Green) # print(actor_trafficlights[0]) if args.safe: blueprints = [x for x in blueprints if int(x.get_attribute('number_of_wheels')) == 4] blueprints = [x for x in blueprints if not x.id.endswith('isetta')] blueprints = [x for x in blueprints if not x.id.endswith('carlacola')] blueprints = [x for x in blueprints if not x.id.endswith('cybertruck')] blueprints = [x for x in blueprints if not x.id.endswith('t2')] spawn_points = world.get_map().get_spawn_points() number_of_spawn_points = len(spawn_points) if args.number_of_vehicles < number_of_spawn_points: random.shuffle(spawn_points) elif args.number_of_vehicles > number_of_spawn_points: msg = 'requested %d vehicles, but could only find %d spawn points' logging.warning(msg, args.number_of_vehicles, number_of_spawn_points) args.number_of_vehicles = number_of_spawn_points # @todo cannot import these directly. SpawnActor = carla.command.SpawnActor SetAutopilot = carla.command.SetAutopilot FutureActor = carla.command.FutureActor # -------------- # Spawn vehicles # -------------- batch = [] blueprint_audi = None ## for audi tt blueprint_toyota = None ## for toyota prius 3 Cars vehicle_actors = [] if args.assigned: ### assign special car, here is audi if blueprint_audi is None: blueprint_audi = world.get_blueprint_library().filter('vehicle.audi.tt') ## actorblueprint, transform, rotation actorblueprint_audi = blueprint_audi[0] if blueprint_toyota is None: blueprint_toyota = world.get_blueprint_library().filter('vehicle.toyota.prius') ## actorblueprint, transform, rotation blueprint_toyota1 = world.get_blueprint_library().filter('vehicle.toyota.prius') blueprint_toyota2 = world.get_blueprint_library().filter('vehicle.toyota.prius') bp_toyota_list = [blueprint_toyota, blueprint_toyota1, blueprint_toyota2] actor_toyota_list = [blueprint_toyota[0], blueprint_toyota1[0], blueprint_toyota2[0]] for n, transform in enumerate(spawn_points): if n >= args.number_of_vehicles - 2: break blueprint = random.choice(blueprints) if blueprint.has_attribute('color'): color = random.choice(blueprint.get_attribute('color').recommended_values) blueprint.set_attribute('color', color) if blueprint.has_attribute('driver_id'): driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values) blueprint.set_attribute('driver_id', driver_id) if n == 0 and blueprint_audi is not None: ## the attribute setting for audi # actorblueprint_audi.set_attribute('role_name', 'autopilot') # set the autopilot actorblueprint_audi.set_attribute('role_name', 'hero') # set the autopilot actorblueprint_audi.set_attribute('color', '255,0,0') # set the color of the audi ## set the starting point on the highway transform.location.x = -16.6 transform.location.y = -87.56 ## -146.11 transform.location.z = 0.2819 ## 1.19 transform.rotation.yaw = 90 transform.rotation.pitch = 0 transform.rotation.roll = 0 print(transform) # print(type(blueprint_audi)) # actor_audi = SpawnActor(actorblueprint_audi, transform).then(SetAutopilot(FutureActor, True)) actor_audi = world.spawn_actor(actorblueprint_audi, transform) print(actor_audi) vehicle_actors.append(actor_audi) traffic_manager.distance_to_leading_vehicle(actor_audi, 2.0) ## set the minimum distance in meters to keep with the others traffic_manager.vehicle_percentage_speed_difference(actor_audi, 30.0) actor_audi.set_autopilot() traffic_manager.auto_lane_change(actor_audi, False) ### not change the lane (force to ride on highway)? traffic_manager.ignore_vehicles_percentage(actor_audi, 0) traffic_manager.ignore_lights_percentage(actor_audi, 100) elif n == 1 and bp_toyota_list is not None: ## the attribute setting for toyota actor_toyota_list[0].set_attribute('role_name', 'autopilot') # set the autopilot ## hero? actor_toyota_list[1].set_attribute('role_name', 'autopilot') # set the autopilot ## hero? actor_toyota_list[2].set_attribute('role_name', 'autopilot') # set the autopilot ## hero? actor_toyota_list[0].set_attribute('color', '0,255,0') # set the color of the toyota actor_toyota_list[1].set_attribute('color', '0,255,255') # set the color of the toyota actor_toyota_list[2].set_attribute('color', '255,255,255') # set the color of the toyota ## set the starting point on the highway transform.location.x = -5.9 transform.location.y = -39.15 ## -146.11 transform.rotation.yaw = 90 transform.rotation.pitch = 0 transform.rotation.roll = 0 print(transform) actor_toyota = world.spawn_actor(actor_toyota_list[0], transform) transform.location.x = -9.25 print(transform) actor_toyota1 = world.spawn_actor(actor_toyota_list[1], transform) transform.location.x = -13.03 print(transform) actor_toyota2 = world.spawn_actor(actor_toyota_list[2], transform) vehicle_actors.append(actor_toyota) vehicle_actors.append(actor_toyota1) vehicle_actors.append(actor_toyota2) traffic_manager.distance_to_leading_vehicle(actor_toyota, 2.0) ## set the minimum distance in meters to keep with the others traffic_manager.distance_to_leading_vehicle(actor_toyota1, 4.0) ## set the minimum distance in meters to keep with the others traffic_manager.distance_to_leading_vehicle(actor_toyota2, 6.0) ## set the minimum distance in meters to keep with the others traffic_manager.vehicle_percentage_speed_difference(actor_toyota, -10.0) traffic_manager.vehicle_percentage_speed_difference(actor_toyota1, -10.0) traffic_manager.vehicle_percentage_speed_difference(actor_toyota2, -10.0) traffic_manager.auto_lane_change(actor_toyota, False) ### not change the lane (force to ride on highway)? traffic_manager.auto_lane_change(actor_toyota1, False) ### not change the lane (force to ride on highway)? traffic_manager.auto_lane_change(actor_toyota2, False) ### not change the lane (force to ride on highway)? actor_toyota1.set_autopilot() actor_toyota.set_autopilot() actor_toyota2.set_autopilot() traffic_manager.ignore_lights_percentage(actor_toyota, 100) traffic_manager.ignore_lights_percentage(actor_toyota1, 100) traffic_manager.ignore_lights_percentage(actor_toyota2, 100) traffic_manager.ignore_vehicles_percentage(actor_toyota, 70) traffic_manager.ignore_vehicles_percentage(actor_toyota1, 50) traffic_manager.ignore_vehicles_percentage(actor_toyota2, 30) else: # mul = 3.5 blueprint.set_attribute('role_name', 'autopilot') # if n < 5: ## for left lanes # transform.location.x = 4.6115 + (n-1) * mul # transform.location.y = -87.56 # transform.location.z = 0.2819 # transform.rotation.yaw = -90 # transform.rotation.pitch = 0 # transform.rotation.roll = 0 # print(transform) # actor = world.spawn_actor(blueprint, transform) # actor.set_autopilot() # traffic_manager.auto_lane_change(actor, False) ### not change the lane (force to ride on highway)? # # if n >= 5 and n < 8: ## for right lanes # transform.location.x = -5.90521 - (n - 5) * mul # transform.location.y = -87.56 # transform.location.z = 0.2819 # transform.rotation.yaw = 90 # transform.rotation.pitch = 0 # transform.rotation.roll = 0 # print(transform) # actor = world.spawn_actor(blueprint, transform) # actor.set_autopilot() # traffic_manager.auto_lane_change(actor, False) ### not change the lane (force to ride on highway)? batch.append(SpawnActor(blueprint, transform).then(SetAutopilot(FutureActor, True))) # vehicle_actors.append(world.spawn_actor(blueprint, transform)) # print(dir(traffic_manager)) for response in client.apply_batch_sync(batch, synchronous_master): if response.error: logging.error(response.error) else: vehicles_list.append(response.actor_id) # ------------- # Spawn Walkers # ------------- # some settings percentagePedestriansRunning = 0.0 # how many pedestrians will run percentagePedestriansCrossing = 0.0 # how many pedestrians will walk through the road # 1. take all the random locations to spawn spawn_points = [] for i in range(args.number_of_walkers): spawn_point = carla.Transform() loc = world.get_random_location_from_navigation() if (loc != None): spawn_point.location = loc spawn_points.append(spawn_point) # 2. we spawn the walker object batch = [] walker_speed = [] for spawn_point in spawn_points: walker_bp = random.choice(blueprintsWalkers) # set as not invincible if walker_bp.has_attribute('is_invincible'): walker_bp.set_attribute('is_invincible', 'false') # set the max speed if walker_bp.has_attribute('speed'): if (random.random() > percentagePedestriansRunning): # walking walker_speed.append(walker_bp.get_attribute('speed').recommended_values[1]) else: # running walker_speed.append(walker_bp.get_attribute('speed').recommended_values[2]) else: print("Walker has no speed") walker_speed.append(0.0) batch.append(SpawnActor(walker_bp, spawn_point)) results = client.apply_batch_sync(batch, True) walker_speed2 = [] for i in range(len(results)): if results[i].error: logging.error(results[i].error) else: walkers_list.append({"id": results[i].actor_id}) walker_speed2.append(walker_speed[i]) walker_speed = walker_speed2 # 3. we spawn the walker controller batch = [] walker_controller_bp = world.get_blueprint_library().find('controller.ai.walker') for i in range(len(walkers_list)): batch.append(SpawnActor(walker_controller_bp, carla.Transform(), walkers_list[i]["id"])) results = client.apply_batch_sync(batch, True) for i in range(len(results)): if results[i].error: logging.error(results[i].error) else: walkers_list[i]["con"] = results[i].actor_id # 4. we put altogether the walkers and controllers id to get the objects from their id for i in range(len(walkers_list)): all_id.append(walkers_list[i]["con"]) all_id.append(walkers_list[i]["id"]) all_actors = world.get_actors(all_id) # wait for a tick to ensure client receives the last transform of the walkers we have just created if not args.sync or not synchronous_master: world.wait_for_tick() else: world.tick() # 5. initialize each controller and set target to walk to (list is [controler, actor, controller, actor ...]) # set how many pedestrians can cross the road world.set_pedestrians_cross_factor(percentagePedestriansCrossing) for i in range(0, len(all_id), 2): # start walker all_actors[i].start() # set walk to random point all_actors[i].go_to_location(world.get_random_location_from_navigation()) # max speed all_actors[i].set_max_speed(float(walker_speed[int(i / 2)])) print('spawned
target_vertex, parent_dict): '''Used to Return shortest path between two vertices, used in bfs_shortestpath_notree''' path = [target_vertex] parent = parent_dict[target_vertex] #get path, composed of vertices while parent != source_vertex: path.insert(0,parent) parent = parent_dict[parent] path.insert(0,source_vertex) #still missing the source vertex (it has no parent) return path ########################################################## ############### UTILS ############ ########################################################## def room_has_goals_of_type_of_box(self,room_id,box_id): #goal id's of goals in room for goal_id in self.goals_per_room[room_id]: if self.state.goal_types[goal_id] == self.state.box_types[box_id]: return True return False def room_has_agents_of_color_of_box(self,room_id,box_id): self.get_agent_distribution_per_room() for agent_id in self.agents_per_room[room_id]: if self.state.agent_colors[agent_id] == self.state.box_colors[box_id]: return True return False def corridor_vertices_of_room(self,room): '''Returns all vertices in a room which are part of corridors''' corridor_vertices = {vertex for vertex in room if self.is_corridor_candidate(vertex)} return corridor_vertices def corridor_vertex_condition(self,vertex): '''neighbours are quicly acesssible among them without having to go through vertex''' neighbours = deque(self.get_neighbours(vertex)) if neighbours: neighbour = neighbours.pop() neighbours_shortest_paths = [self.bfs_shortestpath_notree(neighbour,n,cutoff_branch=20,illegal_vertices={vertex}) for n in neighbours] if None in neighbours_shortest_paths: return True #if it is accessible but takes more than 3 steps (path includes initial and final vertex) for path in neighbours_shortest_paths: if len(path) > 5: return True return False def is_corridor_candidate(self,vertex): neighbours = [n for n in self.get_neighbours(vertex)] if len(neighbours) == 1: #3 walls around him return self.corridor_vertex_condition(neighbours[0]) or False elif self.corridor_vertex_condition(vertex): return True else: return False def is_connected_component(self,container_of_vertices: set, particular_vertices_are_all_in_a_component= None, vertices = None): """ True if the container is a connected component For special cases you can set particular_vertices_are_all_in_a_component to True, If the container_of_vertices is not fully connected but one of its individual components holds all of the vertices in the vertices argument, will return True """ if particular_vertices_are_all_in_a_component: assert vertices, "Provide the vertices that should all be located in the same component" connected_components = self.locate_separate_connected_components(container_of_vertices,store_tree=False) #so now that we have all the connected components, we will check if any of them holds all vertices desired for cc in connected_components: cc_holds_vertices = 0 for vertex in vertices: if vertex not in cc: break else: cc_holds_vertices += 1 # if a component holds only a part of the vertices if 0 < cc_holds_vertices < len(vertices): return False # if the component holds all vertices elif cc_holds_vertices == len(vertices): return True else: vertex = container_of_vertices.pop() container_of_vertices.add(vertex) tree = self.bfs_tree(vertex) return len(container_of_vertices) != len(tree.keys()) def locate_separate_connected_components(self,container_of_vertices: set,store_tree = True): '''Detect connected components of container of vertices''' initial_vertex = container_of_vertices.pop() container_of_vertices.add(initial_vertex) if store_tree: tree = self.bfs_tree(initial_vertex) else: tree = self.bfs_tree_no_store(initial_vertex) connected_components = [] #initialize first room initial_room = {(i,j) for i,j in tree.keys()} connected_components.append(initial_room) #while we haven't accounted for all vertices to be in their room while self.sum_len_elements_of_list(connected_components) != len(container_of_vertices): vertex_not_in_initial_room = self.from_not_in(container_of_vertices,connected_components) if store_tree: tree = self.bfs_tree(initial_vertex) else: tree = self.bfs_tree_no_store(initial_vertex) new_connected_component = {(i,j) for i,j in tree.keys()} connected_components.append(new_room) new_connected_component = {} return connected_components def vertex_is_easily_accessible(self,vertex,walls_container): assert vertex neighbours = self.get_neighbours(vertex) neighbours_not_in_wall_container = {n for n in neighbours if n not in walls_container} return len(neighbours_not_in_wall_container) > 0 def delete_useless_elements(self,rooms:list = None,agents:list = None,boxes:list = None): """Removes useless room information""" if rooms: for room_index in rooms: del self.rooms[room_index] # we are sure the room exists self.agents_per_room.pop(room_index)# not that is has agents or boxes self.boxes_per_room.pop(room_index) if agents: for agent_index in agents: del self.agents_per_room[room_index] def union_of_sets(self,list_of_sets: list): assert list_of_sets set_union = set() number_of_sets = len(list_of_sets) for set_index in range(number_of_sets): set_union = set_union.union(list_of_sets[set_index]) return set_union def break_container_into_adjacent_vertices(self,container): '''Goes through vertices in container and returns list of deques with vertices grouped with adjacent vertices''' list_of_deques = [] while container: current_vertex = container.pop() adjacent_to_current_vertex = self.find_adjacent_vertices_in_container(current_vertex,container) adjacent_to_current_vertex.update([current_vertex]) #we'll say the current vertex is adjacent to himself because we use this for determining corridors container.difference_update(adjacent_to_current_vertex) list_of_deques.append(adjacent_to_current_vertex) return list_of_deques def find_adjacent_vertices_in_container(self,vertex,container): '''Find adjacent vertices of adjacent vertices and so on for a particular vertex in a container''' #assert container , "container is empty" explored_set = set([vertex]) adjacent_to_vertex = {v for v in container if u.are_adjacent(vertex,v)} non_explored = {v for v in adjacent_to_vertex if v not in explored_set} while non_explored: vertex = non_explored.pop() explored_set.add(vertex) adjacent_to_an_adjacent = {v for v in container if u.are_adjacent(vertex,v)} adjacent_to_vertex.update(adjacent_to_an_adjacent) non_explored = {v for v in adjacent_to_vertex if v not in explored_set} return adjacent_to_vertex def container_is_composed_of_adjacent_vertices(self,container): '''True if container is composed of only adjacent vertices, same as previous function but used to check condition instead,''' # TODO: merge it into previous one as argument assert container , "container is empty" vertex = container.pop() container.add(vertex) explored_set = set([vertex]) adjacent_to_vertex = {v for v in container if u.are_adjacent(vertex,v)} non_explored = {v for v in adjacent_to_vertex if v not in explored_set} while non_explored: vertex = non_explored.pop() explored_set.add(vertex) adjacent_to_an_adjacent = {v for v in container if u.are_adjacent(vertex,v)} adjacent_to_vertex.update(adjacent_to_an_adjacent) non_explored = {v for v in adjacent_to_vertex if v not in explored_set} adjacent_to_vertex.add(vertex) #print(len(adjacent_to_vertex) == len(container)) return len(adjacent_to_vertex) == len(container) def is_corner(self,vertex): (x,y) = vertex opt1,opt2,opt3,opt4 = {(x-1,y),(x,y+1)},{(x+1,y),(x,y+1)},{(x+1,y),(x,y-1)},{(x-1,y),(x,y-1)} neighbouring_walls = self.get_neighbours(vertex,in_vertices=False,in_walls=True) if neighbouring_walls == opt1 or neighbouring_walls == opt2 or neighbouring_walls == opt3 or neighbouring_walls == opt4: return True else: return False def is_corridor_corner(self,vertex): (x,y) = vertex opt1,opt2,opt3,opt4 = {(x-1,y),(x,y+1),(x+1,y-1)},{(x+1,y),(x,y+1),(x-1,y-1)},{(x+1,y),(x,y-1),(x-1,y+1)},{(x-1,y),(x,y-1),(x+1,y+1)} neighbouring_walls = self.get_neighbours(vertex,in_vertices=False,in_walls=True) if opt1.issubset(self.walls) or opt2.issubset(self.walls) or opt3.issubset(self.walls) or opt4.issubset(self.walls): return True else: return False def get_neighbours(self,vertex,in_vertices = True, in_walls = False, in_container = None, not_in_container = None): '''Returns neighbours, returns none if there are no neighbours''' assert vertex in self.vertices (x,y) = vertex neighbours = {(x,y+1),(x,y-1),(x-1,y),(x+1,y)} if in_vertices: neighbours = {n for n in neighbours if n in self.vertices} if in_walls: neighbours = {n for n in neighbours if n in self.walls} if in_container: neighbours = {n for n in neighbours if n in in_container} if not_in_container: neighbours = {n for n in neighbours if n not in not_in_container} return neighbours def get_specific_neighbours(self,vertex,in_vertices = True, in_walls = False, S_ = None,N_ = None, W_ = None, E_ = None): '''Returns neighbours, returns none if there are no neighbours''' assert vertex in self.vertices (x,y) = vertex N,S,W,E = (x,y+1),(x,y-1),(x-1,y),(x+1,y) neighbours = set() if S_: neighbours.add(S) if N_: neighbours.add(N) if W_: neighbours.add(W) if E_: neighbours.add(E) if in_vertices: neighbours = {n for n in neighbours if n in self.vertices} if in_walls: neighbours = {n for n in neighbours if n in self.walls} return neighbours def get_neighbours_2coordinates_away(self,vertex): assert vertex in self.vertices (x,y) = vertex neighbours = {(x,y+2),(x,y-2),(x-2,y),(x+2,y)} neighbours = {n for n in neighbours if n in self.vertices} #assert len(neighbours) == 1 #only for depression edge case return neighbours def number_neighbouring_walls_of_vertex(self,vertex): '''Returns amount of neighbours of partical vertex that are walls (min:0 ; max:4) ''' neighbours = self.get_neighbours(vertex, in_vertices = None) n_neighbouring_walls = 0 neighbours_in_walls = {n for n in neighbours if n in self.walls} n_neighbouring_walls += len(neighbours_in_walls) assert n_neighbouring_walls >= 0 and n_neighbouring_walls <= 4, "Neighbouring walls must be between 0 and 4" return n_neighbouring_walls def sum_len_elements_of_list (self,list_): lenght = 0 for element in list_: lenght += len(element) return lenght def from_not_in (self, from_container, not_in_containers): '''Get an element in from_container that isn't in any of the not_in_containers, Returns None if not possibe to do so''' explored_elements = set() for element in from_container: for not_in_container in not_in_containers: if element in not_in_container: explored_elements.add(element) break if element not in explored_elements: return element if len(explored_elements) == len(from_container): return None def direction_between_two_adjacent_vertices(self,_from,to): subtract = tuple(np.subtract(_from,to)) if subtract == (1,0): direction = "S" elif subtract == (-1,0): direction = "N" elif subtract == (0,1): direction = "E" elif subtract == (0,-1): direction = "W" else: raise ValueError("Vertices are not adjacent") return (_from,direction) def get_children_dictionary(self,parent_dictionary): '''Turn dictionary in form children:(parent) to parent:(children)''' children = defaultdict(list) for child, parent in parent_dictionary.items(): children[parent].append(child) return children def is_neighbour_of_vertex(self,is_neighbour,of_vertex): return is_neighbour in self.get_neighbours(of_vertex) def is_wall (self,vertex): return vertex in self.walls def are_walls (self,vertices): are_walls = {v for v in vertices if self.is_wall(v)} return are_walls def deep_copy(self,x): return copy.deepcopy(x) #def locate_high_density_areas: #def locate_high_density_clustered_goals_in_corridor: #def locate_complicated_corners: #class Goal_Rooms_Tree: #or just block and see if connection from room to otther rooms is not blocked # in choke points : see if they connect different rooms if __name__ == '__main__': print("STARTED") agt0 = util.agent(0,"red") agt1 = util.agent(1,"blue") box0 = util.box("A", "blue") maze
<filename>generated/nidmm/_library.py # -*- coding: utf-8 -*- # This file was generated import ctypes import threading from nidmm._visatype import * # noqa: F403,H303 class Library(object): '''Library Wrapper around driver library. Class will setup the correct ctypes information for every function on first call. ''' def __init__(self, ctypes_library): self._func_lock = threading.Lock() self._library = ctypes_library # We cache the cfunc object from the ctypes.CDLL object self.niDMM_Abort_cfunc = None self.niDMM_ConfigureMeasurementAbsolute_cfunc = None self.niDMM_ConfigureMeasurementDigits_cfunc = None self.niDMM_ConfigureMultiPoint_cfunc = None self.niDMM_ConfigureRTDCustom_cfunc = None self.niDMM_ConfigureRTDType_cfunc = None self.niDMM_ConfigureThermistorCustom_cfunc = None self.niDMM_ConfigureThermocouple_cfunc = None self.niDMM_ConfigureTrigger_cfunc = None self.niDMM_ConfigureWaveformAcquisition_cfunc = None self.niDMM_Disable_cfunc = None self.niDMM_ExportAttributeConfigurationBuffer_cfunc = None self.niDMM_ExportAttributeConfigurationFile_cfunc = None self.niDMM_Fetch_cfunc = None self.niDMM_FetchMultiPoint_cfunc = None self.niDMM_FetchWaveform_cfunc = None self.niDMM_GetAttributeViBoolean_cfunc = None self.niDMM_GetAttributeViInt32_cfunc = None self.niDMM_GetAttributeViReal64_cfunc = None self.niDMM_GetAttributeViString_cfunc = None self.niDMM_GetCalDateAndTime_cfunc = None self.niDMM_GetDevTemp_cfunc = None self.niDMM_GetError_cfunc = None self.niDMM_GetExtCalRecommendedInterval_cfunc = None self.niDMM_GetLastCalTemp_cfunc = None self.niDMM_GetSelfCalSupported_cfunc = None self.niDMM_ImportAttributeConfigurationBuffer_cfunc = None self.niDMM_ImportAttributeConfigurationFile_cfunc = None self.niDMM_InitWithOptions_cfunc = None self.niDMM_Initiate_cfunc = None self.niDMM_LockSession_cfunc = None self.niDMM_PerformOpenCableComp_cfunc = None self.niDMM_PerformShortCableComp_cfunc = None self.niDMM_Read_cfunc = None self.niDMM_ReadMultiPoint_cfunc = None self.niDMM_ReadStatus_cfunc = None self.niDMM_ReadWaveform_cfunc = None self.niDMM_ResetWithDefaults_cfunc = None self.niDMM_SelfCal_cfunc = None self.niDMM_SendSoftwareTrigger_cfunc = None self.niDMM_SetAttributeViBoolean_cfunc = None self.niDMM_SetAttributeViInt32_cfunc = None self.niDMM_SetAttributeViReal64_cfunc = None self.niDMM_SetAttributeViString_cfunc = None self.niDMM_UnlockSession_cfunc = None self.niDMM_close_cfunc = None self.niDMM_error_message_cfunc = None self.niDMM_reset_cfunc = None self.niDMM_self_test_cfunc = None def niDMM_Abort(self, vi): # noqa: N802 with self._func_lock: if self.niDMM_Abort_cfunc is None: self.niDMM_Abort_cfunc = self._library.niDMM_Abort self.niDMM_Abort_cfunc.argtypes = [ViSession] # noqa: F405 self.niDMM_Abort_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_Abort_cfunc(vi) def niDMM_ConfigureMeasurementAbsolute(self, vi, measurement_function, range, resolution_absolute): # noqa: N802 with self._func_lock: if self.niDMM_ConfigureMeasurementAbsolute_cfunc is None: self.niDMM_ConfigureMeasurementAbsolute_cfunc = self._library.niDMM_ConfigureMeasurementAbsolute self.niDMM_ConfigureMeasurementAbsolute_cfunc.argtypes = [ViSession, ViInt32, ViReal64, ViReal64] # noqa: F405 self.niDMM_ConfigureMeasurementAbsolute_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_ConfigureMeasurementAbsolute_cfunc(vi, measurement_function, range, resolution_absolute) def niDMM_ConfigureMeasurementDigits(self, vi, measurement_function, range, resolution_digits): # noqa: N802 with self._func_lock: if self.niDMM_ConfigureMeasurementDigits_cfunc is None: self.niDMM_ConfigureMeasurementDigits_cfunc = self._library.niDMM_ConfigureMeasurementDigits self.niDMM_ConfigureMeasurementDigits_cfunc.argtypes = [ViSession, ViInt32, ViReal64, ViReal64] # noqa: F405 self.niDMM_ConfigureMeasurementDigits_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_ConfigureMeasurementDigits_cfunc(vi, measurement_function, range, resolution_digits) def niDMM_ConfigureMultiPoint(self, vi, trigger_count, sample_count, sample_trigger, sample_interval): # noqa: N802 with self._func_lock: if self.niDMM_ConfigureMultiPoint_cfunc is None: self.niDMM_ConfigureMultiPoint_cfunc = self._library.niDMM_ConfigureMultiPoint self.niDMM_ConfigureMultiPoint_cfunc.argtypes = [ViSession, ViInt32, ViInt32, ViInt32, ViReal64] # noqa: F405 self.niDMM_ConfigureMultiPoint_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_ConfigureMultiPoint_cfunc(vi, trigger_count, sample_count, sample_trigger, sample_interval) def niDMM_ConfigureRTDCustom(self, vi, rtd_a, rtd_b, rtd_c): # noqa: N802 with self._func_lock: if self.niDMM_ConfigureRTDCustom_cfunc is None: self.niDMM_ConfigureRTDCustom_cfunc = self._library.niDMM_ConfigureRTDCustom self.niDMM_ConfigureRTDCustom_cfunc.argtypes = [ViSession, ViReal64, ViReal64, ViReal64] # noqa: F405 self.niDMM_ConfigureRTDCustom_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_ConfigureRTDCustom_cfunc(vi, rtd_a, rtd_b, rtd_c) def niDMM_ConfigureRTDType(self, vi, rtd_type, rtd_resistance): # noqa: N802 with self._func_lock: if self.niDMM_ConfigureRTDType_cfunc is None: self.niDMM_ConfigureRTDType_cfunc = self._library.niDMM_ConfigureRTDType self.niDMM_ConfigureRTDType_cfunc.argtypes = [ViSession, ViInt32, ViReal64] # noqa: F405 self.niDMM_ConfigureRTDType_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_ConfigureRTDType_cfunc(vi, rtd_type, rtd_resistance) def niDMM_ConfigureThermistorCustom(self, vi, thermistor_a, thermistor_b, thermistor_c): # noqa: N802 with self._func_lock: if self.niDMM_ConfigureThermistorCustom_cfunc is None: self.niDMM_ConfigureThermistorCustom_cfunc = self._library.niDMM_ConfigureThermistorCustom self.niDMM_ConfigureThermistorCustom_cfunc.argtypes = [ViSession, ViReal64, ViReal64, ViReal64] # noqa: F405 self.niDMM_ConfigureThermistorCustom_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_ConfigureThermistorCustom_cfunc(vi, thermistor_a, thermistor_b, thermistor_c) def niDMM_ConfigureThermocouple(self, vi, thermocouple_type, reference_junction_type): # noqa: N802 with self._func_lock: if self.niDMM_ConfigureThermocouple_cfunc is None: self.niDMM_ConfigureThermocouple_cfunc = self._library.niDMM_ConfigureThermocouple self.niDMM_ConfigureThermocouple_cfunc.argtypes = [ViSession, ViInt32, ViInt32] # noqa: F405 self.niDMM_ConfigureThermocouple_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_ConfigureThermocouple_cfunc(vi, thermocouple_type, reference_junction_type) def niDMM_ConfigureTrigger(self, vi, trigger_source, trigger_delay): # noqa: N802 with self._func_lock: if self.niDMM_ConfigureTrigger_cfunc is None: self.niDMM_ConfigureTrigger_cfunc = self._library.niDMM_ConfigureTrigger self.niDMM_ConfigureTrigger_cfunc.argtypes = [ViSession, ViInt32, ViReal64] # noqa: F405 self.niDMM_ConfigureTrigger_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_ConfigureTrigger_cfunc(vi, trigger_source, trigger_delay) def niDMM_ConfigureWaveformAcquisition(self, vi, measurement_function, range, rate, waveform_points): # noqa: N802 with self._func_lock: if self.niDMM_ConfigureWaveformAcquisition_cfunc is None: self.niDMM_ConfigureWaveformAcquisition_cfunc = self._library.niDMM_ConfigureWaveformAcquisition self.niDMM_ConfigureWaveformAcquisition_cfunc.argtypes = [ViSession, ViInt32, ViReal64, ViReal64, ViInt32] # noqa: F405 self.niDMM_ConfigureWaveformAcquisition_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_ConfigureWaveformAcquisition_cfunc(vi, measurement_function, range, rate, waveform_points) def niDMM_Disable(self, vi): # noqa: N802 with self._func_lock: if self.niDMM_Disable_cfunc is None: self.niDMM_Disable_cfunc = self._library.niDMM_Disable self.niDMM_Disable_cfunc.argtypes = [ViSession] # noqa: F405 self.niDMM_Disable_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_Disable_cfunc(vi) def niDMM_ExportAttributeConfigurationBuffer(self, vi, size, configuration): # noqa: N802 with self._func_lock: if self.niDMM_ExportAttributeConfigurationBuffer_cfunc is None: self.niDMM_ExportAttributeConfigurationBuffer_cfunc = self._library.niDMM_ExportAttributeConfigurationBuffer self.niDMM_ExportAttributeConfigurationBuffer_cfunc.argtypes = [ViSession, ViInt32, ctypes.POINTER(ViInt8)] # noqa: F405 self.niDMM_ExportAttributeConfigurationBuffer_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_ExportAttributeConfigurationBuffer_cfunc(vi, size, configuration) def niDMM_ExportAttributeConfigurationFile(self, vi, file_path): # noqa: N802 with self._func_lock: if self.niDMM_ExportAttributeConfigurationFile_cfunc is None: self.niDMM_ExportAttributeConfigurationFile_cfunc = self._library.niDMM_ExportAttributeConfigurationFile self.niDMM_ExportAttributeConfigurationFile_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405 self.niDMM_ExportAttributeConfigurationFile_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_ExportAttributeConfigurationFile_cfunc(vi, file_path) def niDMM_Fetch(self, vi, maximum_time, reading): # noqa: N802 with self._func_lock: if self.niDMM_Fetch_cfunc is None: self.niDMM_Fetch_cfunc = self._library.niDMM_Fetch self.niDMM_Fetch_cfunc.argtypes = [ViSession, ViInt32, ctypes.POINTER(ViReal64)] # noqa: F405 self.niDMM_Fetch_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_Fetch_cfunc(vi, maximum_time, reading) def niDMM_FetchMultiPoint(self, vi, maximum_time, array_size, reading_array, actual_number_of_points): # noqa: N802 with self._func_lock: if self.niDMM_FetchMultiPoint_cfunc is None: self.niDMM_FetchMultiPoint_cfunc = self._library.niDMM_FetchMultiPoint self.niDMM_FetchMultiPoint_cfunc.argtypes = [ViSession, ViInt32, ViInt32, ctypes.POINTER(ViReal64), ctypes.POINTER(ViInt32)] # noqa: F405 self.niDMM_FetchMultiPoint_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_FetchMultiPoint_cfunc(vi, maximum_time, array_size, reading_array, actual_number_of_points) def niDMM_FetchWaveform(self, vi, maximum_time, array_size, waveform_array, actual_number_of_points): # noqa: N802 with self._func_lock: if self.niDMM_FetchWaveform_cfunc is None: self.niDMM_FetchWaveform_cfunc = self._library.niDMM_FetchWaveform self.niDMM_FetchWaveform_cfunc.argtypes = [ViSession, ViInt32, ViInt32, ctypes.POINTER(ViReal64), ctypes.POINTER(ViInt32)] # noqa: F405 self.niDMM_FetchWaveform_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_FetchWaveform_cfunc(vi, maximum_time, array_size, waveform_array, actual_number_of_points) def niDMM_GetAttributeViBoolean(self, vi, channel_name, attribute_id, attribute_value): # noqa: N802 with self._func_lock: if self.niDMM_GetAttributeViBoolean_cfunc is None: self.niDMM_GetAttributeViBoolean_cfunc = self._library.niDMM_GetAttributeViBoolean self.niDMM_GetAttributeViBoolean_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ctypes.POINTER(ViBoolean)] # noqa: F405 self.niDMM_GetAttributeViBoolean_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_GetAttributeViBoolean_cfunc(vi, channel_name, attribute_id, attribute_value) def niDMM_GetAttributeViInt32(self, vi, channel_name, attribute_id, attribute_value): # noqa: N802 with self._func_lock: if self.niDMM_GetAttributeViInt32_cfunc is None: self.niDMM_GetAttributeViInt32_cfunc = self._library.niDMM_GetAttributeViInt32 self.niDMM_GetAttributeViInt32_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ctypes.POINTER(ViInt32)] # noqa: F405 self.niDMM_GetAttributeViInt32_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_GetAttributeViInt32_cfunc(vi, channel_name, attribute_id, attribute_value) def niDMM_GetAttributeViReal64(self, vi, channel_name, attribute_id, attribute_value): # noqa: N802 with self._func_lock: if self.niDMM_GetAttributeViReal64_cfunc is None: self.niDMM_GetAttributeViReal64_cfunc = self._library.niDMM_GetAttributeViReal64 self.niDMM_GetAttributeViReal64_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ctypes.POINTER(ViReal64)] # noqa: F405 self.niDMM_GetAttributeViReal64_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_GetAttributeViReal64_cfunc(vi, channel_name, attribute_id, attribute_value) def niDMM_GetAttributeViString(self, vi, channel_name, attribute_id, buffer_size, attribute_value): # noqa: N802 with self._func_lock: if self.niDMM_GetAttributeViString_cfunc is None: self.niDMM_GetAttributeViString_cfunc = self._library.niDMM_GetAttributeViString self.niDMM_GetAttributeViString_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ViAttr, ViInt32, ctypes.POINTER(ViChar)] # noqa: F405 self.niDMM_GetAttributeViString_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_GetAttributeViString_cfunc(vi, channel_name, attribute_id, buffer_size, attribute_value) def niDMM_GetCalDateAndTime(self, vi, cal_type, month, day, year, hour, minute): # noqa: N802 with self._func_lock: if self.niDMM_GetCalDateAndTime_cfunc is None: self.niDMM_GetCalDateAndTime_cfunc = self._library.niDMM_GetCalDateAndTime self.niDMM_GetCalDateAndTime_cfunc.argtypes = [ViSession, ViInt32, ctypes.POINTER(ViInt32), ctypes.POINTER(ViInt32), ctypes.POINTER(ViInt32), ctypes.POINTER(ViInt32), ctypes.POINTER(ViInt32)] # noqa: F405 self.niDMM_GetCalDateAndTime_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_GetCalDateAndTime_cfunc(vi, cal_type, month, day, year, hour, minute) def niDMM_GetDevTemp(self, vi, options, temperature): # noqa: N802 with self._func_lock: if self.niDMM_GetDevTemp_cfunc is None: self.niDMM_GetDevTemp_cfunc = self._library.niDMM_GetDevTemp self.niDMM_GetDevTemp_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar), ctypes.POINTER(ViReal64)] # noqa: F405 self.niDMM_GetDevTemp_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_GetDevTemp_cfunc(vi, options, temperature) def niDMM_GetError(self, vi, error_code, buffer_size, description): # noqa: N802 with self._func_lock: if self.niDMM_GetError_cfunc is None: self.niDMM_GetError_cfunc = self._library.niDMM_GetError self.niDMM_GetError_cfunc.argtypes = [ViSession, ctypes.POINTER(ViStatus), ViInt32, ctypes.POINTER(ViChar)] # noqa: F405 self.niDMM_GetError_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_GetError_cfunc(vi, error_code, buffer_size, description) def niDMM_GetExtCalRecommendedInterval(self, vi, months): # noqa: N802 with self._func_lock: if self.niDMM_GetExtCalRecommendedInterval_cfunc is None: self.niDMM_GetExtCalRecommendedInterval_cfunc = self._library.niDMM_GetExtCalRecommendedInterval self.niDMM_GetExtCalRecommendedInterval_cfunc.argtypes = [ViSession, ctypes.POINTER(ViInt32)] # noqa: F405 self.niDMM_GetExtCalRecommendedInterval_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_GetExtCalRecommendedInterval_cfunc(vi, months) def niDMM_GetLastCalTemp(self, vi, cal_type, temperature): # noqa: N802 with self._func_lock: if self.niDMM_GetLastCalTemp_cfunc is None: self.niDMM_GetLastCalTemp_cfunc = self._library.niDMM_GetLastCalTemp self.niDMM_GetLastCalTemp_cfunc.argtypes = [ViSession, ViInt32, ctypes.POINTER(ViReal64)] # noqa: F405 self.niDMM_GetLastCalTemp_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_GetLastCalTemp_cfunc(vi, cal_type, temperature) def niDMM_GetSelfCalSupported(self, vi, self_cal_supported): # noqa: N802 with self._func_lock: if self.niDMM_GetSelfCalSupported_cfunc is None: self.niDMM_GetSelfCalSupported_cfunc = self._library.niDMM_GetSelfCalSupported self.niDMM_GetSelfCalSupported_cfunc.argtypes = [ViSession, ctypes.POINTER(ViBoolean)] # noqa: F405 self.niDMM_GetSelfCalSupported_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_GetSelfCalSupported_cfunc(vi, self_cal_supported) def niDMM_ImportAttributeConfigurationBuffer(self, vi, size, configuration): # noqa: N802 with self._func_lock: if self.niDMM_ImportAttributeConfigurationBuffer_cfunc is None: self.niDMM_ImportAttributeConfigurationBuffer_cfunc = self._library.niDMM_ImportAttributeConfigurationBuffer self.niDMM_ImportAttributeConfigurationBuffer_cfunc.argtypes = [ViSession, ViInt32, ctypes.POINTER(ViInt8)] # noqa: F405 self.niDMM_ImportAttributeConfigurationBuffer_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_ImportAttributeConfigurationBuffer_cfunc(vi, size, configuration) def niDMM_ImportAttributeConfigurationFile(self, vi, file_path): # noqa: N802 with self._func_lock: if self.niDMM_ImportAttributeConfigurationFile_cfunc is None: self.niDMM_ImportAttributeConfigurationFile_cfunc = self._library.niDMM_ImportAttributeConfigurationFile self.niDMM_ImportAttributeConfigurationFile_cfunc.argtypes = [ViSession, ctypes.POINTER(ViChar)] # noqa: F405 self.niDMM_ImportAttributeConfigurationFile_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_ImportAttributeConfigurationFile_cfunc(vi, file_path) def niDMM_InitWithOptions(self, resource_name, id_query, reset_device, option_string, vi): # noqa: N802 with self._func_lock: if self.niDMM_InitWithOptions_cfunc is None: self.niDMM_InitWithOptions_cfunc = self._library.niDMM_InitWithOptions self.niDMM_InitWithOptions_cfunc.argtypes = [ctypes.POINTER(ViChar), ViBoolean, ViBoolean, ctypes.POINTER(ViChar), ctypes.POINTER(ViSession)] # noqa: F405 self.niDMM_InitWithOptions_cfunc.restype = ViStatus # noqa: F405 return self.niDMM_InitWithOptions_cfunc(resource_name, id_query, reset_device, option_string, vi) def niDMM_Initiate(self, vi): # noqa: N802 with self._func_lock: if self.niDMM_Initiate_cfunc is None: self.niDMM_Initiate_cfunc =
#!/usr/bin/python3 # -*- coding: utf-8 -*- import sys import pandas as pd from functools import partial from types import SimpleNamespace from PyQt5 import QtCore from PyQt5.QtGui import QFont, QStandardItemModel, QStandardItem from PyQt5.QtCore import pyqtSignal, Qt, QAbstractTableModel, QModelIndex, QRect, QVariant, QSize from PyQt5.QtWidgets import QMainWindow, QApplication, QHBoxLayout, QWidget, QPushButton, \ QDockWidget, QAction, qApp, QMessageBox, QDialog, QVBoxLayout, QLabel, QGroupBox, QTableWidget, \ QTableWidgetItem, QTabWidget, QLayout, QTextEdit, QListWidget, QListWidgetItem, QMenu, QHeaderView, \ QStyle, QStyleOptionButton, QTableView # ---------------------------------------------------------------------------------------------------------------------- def horizon_layout(widgets: list, weights: list = None) -> QHBoxLayout: layout = QHBoxLayout() if weights is None: weights = [] while len(weights) < len(widgets): weights.append(1) for widget, weight in zip(widgets, weights): layout.addWidget(widget, weight) return layout def create_v_group_box(title: str) -> (QGroupBox, QVBoxLayout): group_box = QGroupBox(title) group_layout = QVBoxLayout() # group_layout.addStretch(1) group_box.setLayout(group_layout) return group_box, group_layout def create_h_group_box(title: str) -> (QGroupBox, QHBoxLayout): group_box = QGroupBox(title) group_layout = QHBoxLayout() # group_layout.addStretch(1) group_box.setLayout(group_layout) return group_box, group_layout def create_new_tab(tab_widget: QTabWidget, title: str, layout: QLayout = None) -> QLayout: empty_wnd = QWidget() wnd_layout = layout if layout is not None else QVBoxLayout() empty_wnd.setLayout(wnd_layout) tab_widget.addTab(empty_wnd, title) return wnd_layout def restore_text_editor(editor: QTextEdit): editor.clear() editor.setFocus() font = QFont() font.setFamily("微软雅黑") font.setPointSize(10) editor.selectAll() editor.setCurrentFont(font) editor.setTextColor(Qt.black) editor.setTextBackgroundColor(Qt.white) # Takes a df and writes it to a qtable provided. df headers become qtable headers # From: https://stackoverflow.com/a/57225144 def write_df_to_qtable(df: pd.DataFrame, table: QTableWidget, index=False): headers = list(df) table.setRowCount(df.shape[0]) table.setColumnCount(df.shape[1] + (1 if index else 0)) table.setHorizontalHeaderLabels(([''] if index else []) + headers) # getting data from df is computationally costly so convert it to array first df_array = df.values for row in range(df.shape[0]): for col in range(df.shape[1]): if col == 0 and index: table.setItem(row, col, QTableWidgetItem(str(df.index[row]))) table.setItem(row, col + (1 if index else 0), QTableWidgetItem(str(df_array[row, col]))) # ---------------------------------------------------------------------------------------------------------------------- # InfoDialog # ---------------------------------------------------------------------------------------------------------------------- class InfoDialog(QDialog): def __init__(self, title, text): super().__init__() self.__text = text self.__title = title self.__button_ok = QPushButton('OK') self.__layout_main = QVBoxLayout() self.init_ui() def init_ui(self): self.setWindowTitle(self.__title) self.__button_ok.clicked.connect(self.on_btn_click_ok) self.__layout_main.addWidget(QLabel(self.__text), 1) self.__layout_main.addWidget(self.__button_ok) self.setLayout(self.__layout_main) def on_btn_click_ok(self): self.close() # ---------------------------------------------------------------------------------------------------------------------- # DataFrameWidget # ---------------------------------------------------------------------------------------------------------------------- class DataFrameWidget(QWidget): def __init__(self, df: pd.DataFrame = None): super(DataFrameWidget, self).__init__() self.__data_frame = df self.__table_main = EasyQTableWidget() self.init_ui() self.update_table(df) # ---------------------------------------------------- UI Init ----------------------------------------------------- def init_ui(self): self.__layout_control() self.__config_control() def __layout_control(self): main_layout = QVBoxLayout() main_layout.addWidget(self.__table_main) self.setLayout(main_layout) def __config_control(self): self.setMinimumSize(QSize(1000, 700)) def update_table(self, df: pd.DataFrame): if df is not None: self.__data_frame = df write_df_to_qtable(self.__data_frame, self.__table_main) # ---------------------------------------------------------------------------------------------------------------------- # CommonMainWindow # ---------------------------------------------------------------------------------------------------------------------- class CommonMainWindow(QMainWindow): def __init__(self, hold_menu: bool = False): super(CommonMainWindow, self).__init__() self.menu_file = None self.menu_view = None self.menu_help = None self.__hold_menu = False self.__sub_window_table = {} self.common_init_ui() self.common_init_menu() # self.init_sub_window() # ----------------------------- Setup and UI ----------------------------- def common_init_ui(self): self.setWindowTitle('Common Main Window - Sleepy') self.statusBar().showMessage('Ready') # self.showFullScreen() self.resize(1280, 800) self.setDockNestingEnabled(True) self.move(QApplication.desktop().screen().rect().center() - self.rect().center()) def common_init_menu(self): menu_bar = self.menuBar() if not self.__hold_menu: self.menu_file = menu_bar.addMenu('File') self.menu_view = menu_bar.addMenu('View') self.menu_help = menu_bar.addMenu('Help') else: self.menu_file = QMenu('File') self.menu_view = QMenu('View') self.menu_help = QMenu('Help') exit_action = QAction('&Exit', self) exit_action.setShortcut('Ctrl+Q') exit_action.setStatusTip('Exit app') exit_action.triggered.connect(qApp.quit) self.menu_file.addAction(exit_action) help_action = QAction('&Help', self) help_action.setShortcut('Ctrl+H') help_action.setStatusTip('Open help Window') help_action.triggered.connect(self.on_menu_help) self.menu_help.addAction(help_action) about_action = QAction('&About', self) about_action.setStatusTip('Open about Window') about_action.triggered.connect(self.on_menu_about) self.menu_help.addAction(about_action) # def init_sub_window(self): # self.__add_sub_window(self.__serial_port_module, { # 'DockName': self.__translate('main', ''), # 'DockArea': Qt.RightDockWidgetArea, # 'DockShow': True, # 'DockFloat': False, # 'MenuPresent': True, # 'ActionTips': self.__translate('main', ''), # 'ActionShortcut': 'Ctrl+S', # }) def get_sub_window(self, name: str) -> SimpleNamespace or None: return self.__sub_window_table.get(name, None) def add_sub_window(self, window: QWidget, name: str, config: dict, menu: QMenu = None): sub_window_data = SimpleNamespace() sub_window_data.config = config self.__setup_sub_window_dock(window, config, sub_window_data) self.__setup_sub_window_menu(config, sub_window_data, menu) self.__setup_sub_window_action(config, sub_window_data) self.__sub_window_table[name] = sub_window_data def __setup_sub_window_dock(self, window: QWidget, config: dict, sub_window_data: SimpleNamespace): dock_name = config.get('DockName', '') dock_area = config.get('DockArea', Qt.NoDockWidgetArea) dock_show = config.get('DockShow', False) dock_float = config.get('DockFloat', False) dock_wnd = QDockWidget(dock_name, self) dock_wnd.setAllowedAreas( Qt.RightDockWidgetArea | Qt.LeftDockWidgetArea | Qt.TopDockWidgetArea | Qt.BottomDockWidgetArea) # With this setting, the dock widget cannot be closed # dock_wnd.setFeatures(QDockWidget.DockWidgetFloatable | QDockWidget.DockWidgetMovable) if dock_area != Qt.NoDockWidgetArea: if dock_area == Qt.AllDockWidgetAreas: self.addDockWidget(Qt.TopDockWidgetArea, dock_wnd) dock_wnd.setFloating(True) dock_wnd.move(QApplication.desktop().screen().rect().center() - self.rect().center()) else: self.addDockWidget(dock_area, dock_wnd) else: self.addDockWidget(Qt.TopDockWidgetArea, dock_wnd) dock_wnd.setFloating(True) dock_wnd.setAllowedAreas(Qt.NoDockWidgetArea) dock_wnd.move(QApplication.desktop().screen().rect().center() - self.rect().center()) dock_wnd.setWidget(window) if dock_float: dock_wnd.setFloating(True) # self.geometry().center() - dock_wnd.rect().center() # dock_wnd.move() if dock_show: dock_wnd.show() else: dock_wnd.hide() sub_window_data.dock_wnd = dock_wnd def __setup_sub_window_menu(self, config: dict, sub_window_data: SimpleNamespace, menu: QMenu): menu_present = config.get('MenuPresent', False) dock_wnd = sub_window_data.dock_wnd if hasattr(sub_window_data, 'dock_wnd') else None if menu_present and dock_wnd is not None: menu_view = self.menu_view if menu is None else menu menu_view.addAction(dock_wnd.toggleViewAction()) def __setup_sub_window_action(self, config: dict, sub_window_data: SimpleNamespace): action_tips = config.get('ActionTips', '') action_shortcut = config.get('ActionShortcut', '') menu_action = sub_window_data.menu_action if hasattr(sub_window_data, 'menu_action') else None if menu_action is not None: if action_shortcut != '': menu_action.setShortcut(action_shortcut) menu_action.setStatusTip(action_tips) # ----------------------------- UI Events ----------------------------- def on_menu_help(self): try: import readme help_wnd = InfoDialog('Help', readme.TEXT) help_wnd.exec() except Exception as e: pass finally: pass def on_menu_about(self): try: import readme QMessageBox.about(self, 'About', readme.ABOUT + 'Version: ' + readme.VERSION) except Exception as e: pass finally: pass def closeEvent(self, event): reply = QMessageBox.question(self, QtCore.QCoreApplication.translate('main', 'Quit'), QtCore.QCoreApplication.translate('main', 'Are you sure to quit'), QMessageBox.Yes | QMessageBox.Cancel, QMessageBox.Cancel) if reply == QMessageBox.Yes: event.accept() QApplication.quit() else: event.ignore() # ---------------------------------------------------------------------------------------------------------------------- # WrapperQDialog # ---------------------------------------------------------------------------------------------------------------------- class WrapperQDialog(QDialog): """ Wrap a QWidget in a QDialog which has 'OK' and 'Cancel' button as default :param wrapped_wnd: The Widget you want to warp :param has_button: Show 'OK' and 'Cancel' button or not. """ def __init__(self, wrapped_wnd: QWidget, has_button: bool = False): super(WrapperQDialog, self).__init__() self.__is_ok = False self.__wrapped_wnd_destroyed = False self.__wrapped_wnd = wrapped_wnd self.__wrapped_wnd.setAttribute(QtCore.Qt.WA_DeleteOnClose, True) self.__wrapped_wnd.destroyed.connect(self.on_wrap_wnd_destroyed) self.__has_button = has_button self.__button_ok = QPushButton('OK') self.__button_cancel = QPushButton('Cancel') layout = QVBoxLayout() layout.addWidget(self.__wrapped_wnd) self.setWindowTitle(self.__wrapped_wnd.windowTitle()) if has_button: line = QHBoxLayout() line.addWidget(QLabel(), 100) line.addWidget(self.__button_ok, 0) line.addWidget(self.__button_cancel, 0) self.__button_ok.clicked.connect(self.on_button_ok) self.__button_cancel.clicked.connect(self.on_button_cancel) layout.addLayout(line) self.setLayout(layout) self.setWindowFlags(int(self.windowFlags()) | Qt.WindowMinMaxButtonsHint | QtCore.Qt.WindowSystemMenuHint) def is_ok(self): """ Check whether user clicking the 'OK' button. :return: True if user close the Dialog by clicking 'OK' button else False """ return self.__is_ok def get_wrapped_wnd(self) -> QWidget: return self.__wrapped_wnd def on_button_ok(self): self.__is_ok = True self.close() def on_button_cancel(self): self.close() def on_wrap_wnd_destroyed(self): self.__wrapped_wnd_destroyed = True self.close() def closeEvent(self, event): if self.__wrapped_wnd_destroyed: event.accept() else: if self.__wrapped_wnd.close(): event.accept() else: event.ignore() # ---------------------------------------------------------------------------------------------------------------------- # EasyQTableWidget # ---------------------------------------------------------------------------------------------------------------------- class EasyQTableWidget(QTableWidget): """ QTableWidget assistance class """ def __init__(self, *__args): super(EasyQTableWidget, self).__init__(*__args) def AppendRow(self, content: [str], data: any = None): row_count = self.rowCount() self.insertRow(row_count) for col in range(0, len(content)): item = QTableWidgetItem(content[col]) item.setData(Qt.UserRole, data) self.setItem(row_count, col, item) def GetCurrentRow(self) -> [str]: row_index = self.GetCurrentIndex() if row_index == -1: return [] return [self.model().index(row_index, col_index).data() for col_index in range(self.columnCount())] def GetCurrentIndex(self) -> int: return self.selectionModel().currentIndex().row() if self.selectionModel().hasSelection() else -1 def AddWidgetToCell(self, row: int, col: int, widgets: [QWidget]): layout = QHBoxLayout() layout.setSpacing(0) layout.setContentsMargins(0, 0, 0, 0) wrap_widget = QWidget() wrap_widget.setLayout(layout) wrap_widget.setContentsMargins(0, 0, 0, 0) if not isinstance(widgets, (list, tuple)): widgets = [widgets] for widget in widgets: layout.addWidget(widget) self.setCellWidget(row, col, wrap_widget) # ---------------------------------------------------------------------------------------------------------------------- # EasyQListSuite # ---------------------------------------------------------------------------------------------------------------------- class EasyQListSuite(QWidget): """ Provide a window that has a QListWidget with 'Add' and 'Remove' button. """ def __init__(self, *__args): super(EasyQListSuite, self).__init__(*__args) self.__item_list = [] self.__list_main = QListWidget(self) self.__button_add = QPushButton('Add') self.__button_remove = QPushButton('Remove') self.__init_ui() self.__config_ui() def update_item(self, items: [(str, any)]): """ Specify a (key, value) tuple list. Key will be displayed as list item. Value can be retrieved by get_select_items() :param items: Specify a (key, value) tuple list. :return: None """ self.__item_list.clear() for item in items: if isinstance(item, (list, tuple)): if len(item) == 0: continue elif len(item) == 1: self.__item_list.append((str(item[0]), item[0])) else: self.__item_list.append((str(item[0]), item[1])) else: self.__item_list.append((str(item), item)) self.__update_list() def get_select_items(self) -> [any]: """ Get the value of the items that user selected. :return: The value of the items that user selected. """ return [item.data(Qt.UserRole) for item in self.__list_main.selectedItems()] def set_add_handler(self, handler): """ Add a handler for 'Add' button clicking :param handler: The handler that connects to the button clicked signal :return: """ self.__button_add.clicked.connect(handler) def set_remove_handler(self, handler): """ Add a handler for 'Remove' button clicking :param handler: The handler that connects to the button clicked signal :return: """ self.__button_remove.clicked.connect(handler) # ---------------------------------------- Private ---------------------------------------- def __init_ui(self): main_layout = QVBoxLayout() self.setLayout(main_layout) line_layout = QHBoxLayout() line_layout.addWidget(self.__button_add) line_layout.addWidget(self.__button_remove) main_layout.addWidget(self.__list_main) main_layout.addLayout(line_layout) def __config_ui(self): pass def __update_list(self): self.__list_main.clear() for text, obj in self.__item_list: item = QListWidgetItem() item.setText(text) item.setData(Qt.UserRole, obj) self.__list_main.addItem(item) # --------------------------------------------------- PageTableWidget -------------------------------------------------- class PageTableWidget(QWidget): def __init__(self): super(PageTableWidget, self).__init__() self.__page = 0 self.__max_page = 0 self.__item_per_page = 50 self.__max_item_count = 0 self.__table_main = EasyQTableWidget() self.__layout_bottom = QHBoxLayout() self.init_ui() def init_ui(self): self.__layout_control() self.__config_control() def __layout_control(self): main_layout = QVBoxLayout() self.setLayout(main_layout) main_layout.addWidget(self.__table_main) main_layout.addLayout(self.__layout_bottom) def __config_control(self): self.add_extra_button('<<', '<<') self.add_extra_button('<', '<') self.add_extra_button('>', '>') self.add_extra_button('>>', '>>') # -------------------------------------
or more items if len(valid_reactant_nodes) < 5000 and not force_parallel: output = [] worker(valid_reactant_nodes) comp_node_set = comp_node_set.union(set(output)) else: with mp.Manager() as manager: # Initialize output list in manager output = manager.list() # Initialize processes procs = [] for work in chunks(list(valid_reactant_nodes), proc_num): p = mp.Process(target=worker, args=(work,)) procs.append(p) p.start() # Stop workers for p in procs: p.join() # Get results comp_node_set = comp_node_set.union(set(output)) return comp_node_set def distance_to_origin(network, proc_num=1, N=-1): """ Calculates the shortest distance (number of reactions) from the starting compounds (origin) to every node up to distance N. Set N to -1 to exhaustively calculate the minimum distance to every node that is reachable. Returns two sets in a tuple: Valid compound nodes and valid reactant nodes. """ s_out("\nCalculating minimum distance of nodes to origin...\n\n") time_start = time.time() # Number of nodes for formatting L = len(network.nodes()) l = len(str(L)) # Set up counters n = 0 c = 0 rf = 0 pf = 0 rr = 0 pr = 0 # Start with no valid reactant or compound nodes valid_reactant_nodes = set([]) valid_compound_nodes = set([]) # The "previous" lists are also empty prev_vrn = list(valid_reactant_nodes) prev_vcn = list(valid_compound_nodes) # Start with no new valid reactant nodes new_vrn = set([]) while True: # Valid product nodes will be connected at the start of an expansion # cycle. They are however in principle identified in the previous cycle # via valid reactant nodes for r_node in new_vrn: p_node = network.successors(r_node)[0] network.node[p_node]['dist'] = n node_type = network.node[p_node]['type'] if node_type == 'pf': pf += 1 if node_type == 'pr': pr += 1 # Expand the valid compound set # When n = 0, this means the starting compounds # When n > 0, the valid compound set will be expanded new_vcn = expand_valid_compound_set(network, proc_num, new_vrn, \ valid_compound_nodes) - valid_compound_nodes valid_compound_nodes = new_vcn.union(valid_compound_nodes) new_vrn = find_valid_reactant_nodes(network, proc_num, \ valid_compound_nodes) - valid_reactant_nodes valid_reactant_nodes = new_vrn.union(valid_reactant_nodes) for node in new_vcn: network.node[node]['dist'] = n c += 1 for node in new_vrn: network.node[node]['dist'] = n node_type = network.node[node]['type'] if node_type == 'rf': rf += 1 if node_type == 'rr': rr += 1 # Nicely formatted progress output output = ''.join([ '{0:<', str(l+6), '} {1:>', str(l+5), '} {2:>', str(l+5), '} {3:>', str(l+5), '} {4:>', str(l+5), '} {5:>', str(l+5), '}' ]) print(output.format('Step ' + str(n) + ':', str(c) + ' c', str(rf) + \ ' rf', str(pf) + ' pf', str(rr) + ' rr', str(pr) + ' pr')) n += 1 no_new_vrn = set(prev_vrn) == valid_reactant_nodes no_new_vcn = set(prev_vcn) == valid_compound_nodes if no_new_vrn and no_new_vcn: # When no new valid compound or reactant nodes have been identified, # it is time to stop break else: if n > N and N !=-1: # n starts at 0 and increments by one before each round dealing # with that particular step n - stop when n exceeds the limit break prev_vrn = list(valid_reactant_nodes) prev_vcn = list(valid_compound_nodes) total_time = time.time() - time_start s_out("\nDone in %ss.\n" %str(total_time)) return (valid_compound_nodes, valid_reactant_nodes) def prune_network(network, remove_cfm=True): """ Remove all nodes that are 'unreachable' (lacking a 'dist' data key). Also removes CFM spectra from the mine_data dictionary by default. """ for node in network.nodes(): try: x = network.node[node]['dist'] except KeyError: network.remove_node(node) if remove_cfm: for mid in network.graph['mine_data'].keys(): if 'Neg_CFM_spectra' in network.graph['mine_data'][mid].keys(): del network.graph['mine_data'][mid]['Neg_CFM_spectra'] if 'Pos_CFM_spectra' in network.graph['mine_data'][mid].keys(): del network.graph['mine_data'][mid]['Pos_CFM_spectra'] network.graph['pruned'] = True def prepare_dictionaries(network): """Prepare dictionaries for translating KEGG IDs and Names to MINE IDs.""" network.graph['kegg2nodes'] = {} network.graph['name2nodes'] = {} for node in network.nodes(): # Only consider nodes that are reachable, i.e. have predecessor(s) if not network.predecessors(node): continue # Get associated KEGG IDs try: mid = network.node[node]['mid'] kegg_ids = network.graph['mine_data'][mid]['DB_links']['KEGG'] except KeyError: kegg_ids = [] # Get associated names try: mid = network.node[node]['mid'] names = network.graph['mine_data'][mid]['Names'] except KeyError: names = [] # Add node to set of nodes for each KEGG ID for kegg_id in kegg_ids: try: network.graph['kegg2nodes'][kegg_id].add(node) except KeyError: network.graph['kegg2nodes'][kegg_id] = set([node]) # Add node to set of nodes for each name for name in names: try: network.graph['name2nodes'][name].add(node) except KeyError: network.graph['name2nodes'][name] = set([node]) def create_SMILES_to_KEGG_dict(KEGG_dict): """Create a dictionary for translating SMILES to KEGG IDs.""" SMILES_to_KEGG = {} for KEGG_id in KEGG_dict.keys(): KEGG_comp = KEGG_dict[KEGG_id] try: SMILES = KEGG_comp['SMILES'] except KeyError: continue try: SMILES_to_KEGG[SMILES].add(KEGG_id) except KeyError: SMILES_to_KEGG[SMILES] = {KEGG_id} return SMILES_to_KEGG def MINE_comps_KEGG_filter(comps, SMILES_to_KEGG): """Remove compounds that have no or cannot be assigned a KEGG ID.""" comps_filtered = [] for comp in comps: try: KEGG_ids = comp['DB_links']['KEGG'] except KeyError: KEGG_ids = [] try: SMILES = comp['SMILES'] except KeyError: SMILES = '' if KEGG_ids: comps_filtered.append(comp) continue if SMILES in SMILES_to_KEGG.keys(): KEGG_ids = sorted(list(SMILES_to_KEGG[SMILES])) try: comp['DB_links']['KEGG'] = KEGG_ids except KeyError: comp['DB_links'] = {'KEGG' : KEGG_ids} comps_filtered.append(comp) return comps_filtered def operators_identical(op1, op2): """Checks if two BNICE EC operators are eachother's reverse/identical.""" for e1, e2 in zip(op1.split('.'), op2.split('.')): if e1.lstrip('-') != e2.lstrip('-'): return False return True def extract_ints(str_list): int_list = [] for s in str_list: try: int_list.append(int(s)) except ValueError: continue return int_list def remove_redundant_MINE_rxns(rxns): """Removes identical but reversed MINE reactions.""" discarded_rxns = set() # Identify all operators all_operators = set() p = Progress(max_val=len(rxns), design='p') n = 0 for rxn in rxns: n += 1 s_out("\rIdentifying operators... %s" % p.to_string(n)) for operator in rxn['Operators']: all_operators.add(operator) s_out("\rIdentifying operators... Done. \n") # Identify operators that have a reverse operators_with_reverse = set() for op1 in all_operators: for op2 in all_operators - set([op1]): if operators_identical(op1, op2): operators_with_reverse.add(op1) # Reduce the reactions to those in which all operators have a reverse rxns_red = [] p = Progress(max_val=len(rxns), design='p') n = 0 for rxn in enumerate(rxns): n += 1 s_out("\rIdentifying redundancy candidates... %s" % p.to_string(n)) add_rxn = True for operator in rxn[1]['Operators']: if operator not in operators_with_reverse: add_rxn = False break if add_rxn: rxns_red.append(rxn) s_out("\rIdentifying possibly redundant reactions... Done. \n") # Set up progress indicator p = Progress(max_val = len(rxns_red)**2, design='pt') n = 0 # Iterate over all reaction pairs for rp in product(rxns_red, rxns_red): # Report progress n += 1 s_out("\rRemoving redundant MINE reactions... %s" % p.to_string(n)) # Don't compare a reaction to itself if rp[0][0] == rp[1][0]: continue # Don't perform further comparisons for discarded reactions if rp[0][0] in discarded_rxns or rp[1][0] in discarded_rxns: continue # Compare the Products and reactants try: r1p = set([tuple(c) for c in rp[0][1]['Products']]) except KeyError: r1p = set() try: r2p = set([tuple(c) for c in rp[1][1]['Products']]) except KeyError: r2p = set() try: r1r = set([tuple(c) for c in rp[0][1]['Reactants']]) except KeyError: r1r = set() try: r2r = set([tuple(c) for c in rp[1][1]['Reactants']]) except KeyError: r2r = set() if r1p == r2r and r2p == r1r: are_mutual_reverse = True else: are_mutual_reverse = False # Compare the sets of operators ops1 = rp[0][1]['Operators'] ops2 = rp[1][1]['Operators'] n_identical = 0 for op_pair in product(ops1, ops2): if operators_identical(*op_pair): n_identical += 1 # If the reactions have the same operators and are eachothers reverse, # determine which one to discard if n_identical == len(ops1) == len(ops2) and are_mutual_reverse: # One reaction needs to be discarded; Find which one to keep ops1_int = (extract_ints(op.split('.')) for op in ops1) ops2_int = (extract_ints(op.split('.')) for op in ops2) ops1_neg = sum(any(x < 0 for x in op) for op in ops1_int) ops2_neg = sum(any(x < 0 for x in op) for op in ops2_int) # Discard the reaction with most negative operators if ops1_neg < ops2_neg: discarded_rxns.add(rp[1][0]) elif ops1_neg > ops2_neg: discarded_rxns.add(rp[0][0]) # Otherwise discard the second reaction else: if rp[0][0] < rp[1][0]: discarded_rxns.add(rp[1][0]) else: discarded_rxns.add(rp[0][0]) # Return reactions that were not discarded print("") return [rxns[i] for i in range(len(rxns)) if i not in discarded_rxns] def remove_non_KEGG_MINE_rxns(rxns, comps): """Return reactions with only KEGG compounds.""" filtered_rxns = [] allowed_ids = set([c['_id'] for c in comps]) p = Progress(max_val = len(rxns), design = 'p') n = 0 for rxn in rxns: n += 1 s_out("\rRemoving non-KEGG MINE reactions... %s" % p.to_string(n)) if set(extract_reaction_comp_ids(rxn)).issubset(allowed_ids): filtered_rxns.append(rxn) print("") return filtered_rxns def KEGG_rxns_from_MINE_rxns(rxns, comps, KEGG_comp_ids): """Produce reactions with KEGG IDs instead of MINE
#!/usr/bin/env python """Resource Registry implementation""" __author__ = '<NAME>' from pyon.core import bootstrap from pyon.core.bootstrap import IonObject, CFG from pyon.core.exception import BadRequest, NotFound, Inconsistent from pyon.core.object import IonObjectBase from pyon.core.registry import getextends from pyon.datastore.datastore import DataStore from pyon.datastore.datastore_query import DatastoreQueryBuilder, DQ from pyon.ion.event import EventPublisher from pyon.ion.identifier import create_unique_resource_id, create_unique_association_id from pyon.ion.resource import LCS, LCE, PRED, RT, AS, OT, get_restype_lcsm, is_resource, ExtendedResourceContainer, \ lcstate, lcsplit, Predicates, create_access_args from pyon.ion.process import get_ion_actor_id from pyon.util.containers import get_ion_ts from pyon.util.log import log from interface.objects import Attachment, AttachmentType, ResourceModificationType class ResourceRegistry(object): """ Class that uses a datastore to provide a resource registry. The resource registry adds knowledge of resource objects and associations. Resources have lifecycle state. Add special treatment of Attachment resources """ DEFAULT_ATTACHMENT_NAME = 'resource.attachment' def __init__(self, datastore_manager=None, container=None): self.container = container or bootstrap.container_instance # Get an instance of datastore configured as resource registry. datastore_manager = datastore_manager or self.container.datastore_manager self.rr_store = datastore_manager.get_datastore(DataStore.DS_RESOURCES, DataStore.DS_PROFILE.RESOURCES) self.name = 'container_resource_registry' self.id = 'container_resource_registry' self.event_pub = EventPublisher() self.superuser_actors = None def start(self): pass def stop(self): self.close() def close(self): """ Pass-through method to close the underlying datastore. """ self.rr_store.close() # ------------------------------------------------------------------------- # Resource object manipulation def create(self, object=None, actor_id=None, object_id=None, attachments=None): """ Accepts object that is to be stored in the data store and tags them with additional data (timestamp and such) If actor_id is provided, creates hasOwner association with objects. If attachments are provided (in dict(att1=dict(data=xyz), att2=dict(data=aaa, content_type='text/plain') form) they get attached to the object. Returns a tuple containing object and revision identifiers. """ if object is None: raise BadRequest("Object not present") if not isinstance(object, IonObjectBase): raise BadRequest("Object is not an IonObject") if not is_resource(object): raise BadRequest("Object is not a Resource") if "_id" in object: raise BadRequest("Object must not contain _id") if "_rev" in object: raise BadRequest("Object must not contain _rev") lcsm = get_restype_lcsm(object.type_) object.lcstate = lcsm.initial_state if lcsm else LCS.DEPLOYED object.availability = lcsm.initial_availability if lcsm else AS.AVAILABLE cur_time = get_ion_ts() object.ts_created = cur_time object.ts_updated = cur_time if object_id is None: new_res_id = create_unique_resource_id() else: new_res_id = object_id res = self.rr_store.create(object, new_res_id, attachments=attachments) res_id, rev = res if actor_id and actor_id != 'anonymous': log.debug("Associate resource_id=%s with owner=%s", res_id, actor_id) self.create_association(res_id, PRED.hasOwner, actor_id) if self.container.has_capability(self.container.CCAP.EVENT_PUBLISHER): self.event_pub.publish_event(event_type="ResourceModifiedEvent", origin=res_id, origin_type=object.type_, sub_type="CREATE", mod_type=ResourceModificationType.CREATE) return res def create_mult(self, res_list, actor_id=None): """Creates a list of resources from objects. Objects may have _id in it to predetermine their ID. Returns a list of 2-tuples (resource_id, rev)""" cur_time = get_ion_ts() id_list = [] for resobj in res_list: lcsm = get_restype_lcsm(resobj.type_) resobj.lcstate = lcsm.initial_state if lcsm else LCS.DEPLOYED resobj.availability = lcsm.initial_availability if lcsm else AS.AVAILABLE resobj.ts_created = cur_time resobj.ts_updated = cur_time id_list.append(resobj._id if "_id" in resobj else create_unique_resource_id()) res = self.rr_store.create_mult(res_list, id_list, allow_ids=True) rid_list = [(rid, rrv) for success, rid, rrv in res] # Associations with owners if actor_id and actor_id != 'anonymous': assoc_list = [] for resobj, (rid, rrv) in zip(res_list, rid_list): resobj._id = rid assoc_list.append((resobj, PRED.hasOwner, actor_id)) self.create_association_mult(assoc_list) # Publish events for resobj, (rid, rrv) in zip(res_list, rid_list): self.event_pub.publish_event(event_type="ResourceModifiedEvent", origin=rid, origin_type=resobj.type_, mod_type=ResourceModificationType.CREATE) return rid_list def read(self, object_id='', rev_id=''): if not object_id: raise BadRequest("The object_id parameter is an empty string") return self.rr_store.read(object_id, rev_id) def read_mult(self, object_ids=None, strict=True): """ @param object_ids a list of resource ids (can be empty) @param strict a bool - if True (default), raise a NotFound in case one of the resources was not found Returns resource objects for given list of resource ids in the same order. If a resource object was not found, contains None (unless strict==True) in which case NotFound will be raised. """ if object_ids is None: raise BadRequest("The object_ids parameter is empty") return self.rr_store.read_mult(object_ids, strict=strict) def update(self, object): if object is None: raise BadRequest("Object not present") if not hasattr(object, "_id") or not hasattr(object, "_rev"): raise BadRequest("Object does not have required '_id' or '_rev' attribute") # Do an check whether LCS has been modified res_obj = self.read(object._id) object.ts_updated = get_ion_ts() if res_obj.lcstate != object.lcstate or res_obj.availability != object.availability: log.warn("Cannot modify %s life cycle state or availability in update current=%s/%s given=%s/%s. " + "DO NOT REUSE THE SAME OBJECT IN CREATE THEN UPDATE", type(res_obj).__name__, res_obj.lcstate, res_obj.availability, object.lcstate, object.availability) object.lcstate = res_obj.lcstate object.availability = res_obj.availability self.event_pub.publish_event(event_type="ResourceModifiedEvent", origin=object._id, origin_type=object.type_, sub_type="UPDATE", mod_type=ResourceModificationType.UPDATE) return self.rr_store.update(object) def delete(self, object_id='', del_associations=False): res_obj = self.read(object_id) if not res_obj: raise NotFound("Resource %s does not exist" % object_id) if not del_associations: self._delete_owners(object_id) if del_associations: assoc_ids = self.find_associations(anyside=object_id, id_only=True) self.rr_store.delete_doc_mult(assoc_ids, object_type="Association") #log.debug("Deleted %s associations for resource %s", len(assoc_ids), object_id) elif self._is_in_association(object_id): log.warn("Deleting object %s that still has associations" % object_id) res = self.rr_store.delete(object_id) if self.container.has_capability(self.container.CCAP.EVENT_PUBLISHER): self.event_pub.publish_event(event_type="ResourceModifiedEvent", origin=res_obj._id, origin_type=res_obj.type_, sub_type="DELETE", mod_type=ResourceModificationType.DELETE) return res def _delete_owners(self, resource_id): # Delete all owner users. owners, assocs = self.rr_store.find_objects(resource_id, PRED.hasOwner, RT.ActorIdentity, id_only=True) for aid in assocs: self.delete_association(aid) def retire(self, resource_id): return self.execute_lifecycle_transition(resource_id, LCE.RETIRE) def lcs_delete(self, resource_id): """ This is the official "delete" for resource objects: they are set to DELETED lcstate. All associations are set to deleted as well. """ res_obj = self.read(resource_id) old_state = res_obj.lcstate if old_state == LCS.DELETED: raise BadRequest("Resource id=%s already DELETED" % (resource_id)) res_obj.lcstate = LCS.DELETED res_obj.ts_updated = get_ion_ts() updres = self.rr_store.update(res_obj) log.debug("retire(res_id=%s). Change %s_%s to %s_%s", resource_id, old_state, res_obj.availability, res_obj.lcstate, res_obj.availability) assocs = self.find_associations(anyside=resource_id, id_only=False) for assoc in assocs: assoc.retired = True # retired means soft deleted if assocs: self.rr_store.update_mult(assocs) log.debug("lcs_delete(res_id=%s). Retired %s associations", resource_id, len(assocs)) if self.container.has_capability(self.container.CCAP.EVENT_PUBLISHER): self.event_pub.publish_event(event_type="ResourceLifecycleEvent", origin=res_obj._id, origin_type=res_obj.type_, sub_type="%s.%s" % (res_obj.lcstate, res_obj.availability), lcstate=res_obj.lcstate, availability=res_obj.availability, lcstate_before=old_state, availability_before=res_obj.availability) def execute_lifecycle_transition(self, resource_id='', transition_event=''): if transition_event == LCE.DELETE: return self.lcs_delete(resource_id) res_obj = self.read(resource_id) old_lcstate = res_obj.lcstate old_availability = res_obj.availability if transition_event == LCE.RETIRE: if res_obj.lcstate == LCS.RETIRED or res_obj.lcstate == LCS.DELETED: raise BadRequest("Resource id=%s, type=%s, lcstate=%s, availability=%s has no transition for event %s" % ( resource_id, res_obj.type_, old_lcstate, old_availability, transition_event)) res_obj.lcstate = LCS.RETIRED else: restype = res_obj.type_ restype_workflow = get_restype_lcsm(restype) if not restype_workflow: raise BadRequest("Resource id=%s type=%s has no lifecycle" % (resource_id, restype)) new_lcstate = restype_workflow.get_lcstate_successor(old_lcstate, transition_event) new_availability = restype_workflow.get_availability_successor(old_availability, transition_event) if not new_lcstate and not new_availability: raise BadRequest("Resource id=%s, type=%s, lcstate=%s, availability=%s has no transition for event %s" % ( resource_id, restype, old_lcstate, old_availability, transition_event)) if new_lcstate: res_obj.lcstate = new_lcstate if new_availability: res_obj.availability = new_availability res_obj.ts_updated = get_ion_ts() self.rr_store.update(res_obj) log.debug("execute_lifecycle_transition(res_id=%s, event=%s). Change %s_%s to %s_%s", resource_id, transition_event, old_lcstate, old_availability, res_obj.lcstate, res_obj.availability) if self.container.has_capability(self.container.CCAP.EVENT_PUBLISHER): self.event_pub.publish_event(event_type="ResourceLifecycleEvent", origin=res_obj._id, origin_type=res_obj.type_, sub_type="%s.%s" % (res_obj.lcstate, res_obj.availability), lcstate=res_obj.lcstate, availability=res_obj.availability, lcstate_before=old_lcstate, availability_before=old_availability, transition_event=transition_event) return "%s_%s" % (res_obj.lcstate, res_obj.availability) def set_lifecycle_state(self, resource_id='', target_lcstate=''): """Sets the lifecycle state (if possible) to the target state. Supports compound states""" if not target_lcstate: raise BadRequest("Bad life-cycle state %s" % target_lcstate) if target_lcstate.startswith(LCS.DELETED): self.lcs_delete(resource_id) if target_lcstate.startswith(LCS.RETIRED): self.execute_lifecycle_transition(resource_id, LCE.RETIRE) res_obj = self.read(resource_id) old_lcstate = res_obj.lcstate old_availability = res_obj.availability restype = res_obj.type_ restype_workflow = get_restype_lcsm(restype) if not restype_workflow: raise BadRequest("Resource id=%s type=%s has no lifecycle" % (resource_id, restype)) if '_' in target_lcstate: # Support compound target_lcs, target_av = lcsplit(target_lcstate) if target_lcs not in LCS: raise BadRequest("Unknown life-cycle state %s" % target_lcs) if target_av and target_av not in AS: raise BadRequest("Unknown life-cycle availability %s" % target_av) elif target_lcstate in LCS: target_lcs, target_av = target_lcstate, res_obj.availability elif target_lcstate in AS: target_lcs, target_av = res_obj.lcstate, target_lcstate else: raise BadRequest("Unknown life-cycle state %s" % target_lcstate) # Check that target state is allowed lcs_successors = restype_workflow.get_lcstate_successors(old_lcstate) av_successors = restype_workflow.get_availability_successors(old_availability) found_lcs, found_av = target_lcs in lcs_successors.values(), target_av in av_successors.values() if not found_lcs and not found_av: raise BadRequest("Target state %s not reachable for resource in state %s_%s" % ( target_lcstate, old_lcstate, old_availability)) res_obj.lcstate = target_lcs res_obj.availability = target_av res_obj.ts_updated = get_ion_ts() updres = self.rr_store.update(res_obj) log.debug("set_lifecycle_state(res_id=%s, target=%s). Change %s_%s to %s_%s", resource_id, target_lcstate, old_lcstate, old_availability, res_obj.lcstate, res_obj.availability) if self.container.has_capability(self.container.CCAP.EVENT_PUBLISHER): self.event_pub.publish_event(event_type="ResourceLifecycleEvent", origin=res_obj._id, origin_type=res_obj.type_, sub_type="%s.%s" % (res_obj.lcstate, res_obj.availability), lcstate=res_obj.lcstate, availability=res_obj.availability, lcstate_before=old_lcstate, availability_before=old_availability) # ------------------------------------------------------------------------- # Attachment operations def create_attachment(self, resource_id='', attachment=None, actor_id=None): """ Creates an Attachment resource from given argument and associates it with the given resource. @retval the resource ID for the attachment resource. """ if attachment is None: raise BadRequest("Object not present") if not isinstance(attachment, Attachment): raise BadRequest("Object is not an Attachment") attachment.object_id = resource_id if resource_id else "" attachment.attachment_size = -1 attachment_content = None if attachment.attachment_type == AttachmentType.BLOB: if type(attachment.content) is
<reponame>ShubhamPandey28/sunpy """ Common solar physics coordinate systems. This submodule implements various solar physics coordinate frames for use with the `astropy.coordinates` module. """ import numpy as np import astropy.units as u from astropy.coordinates import Attribute, ConvertError from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping from astropy.coordinates.representation import (CartesianRepresentation, SphericalRepresentation, CylindricalRepresentation, UnitSphericalRepresentation) from sunpy.sun.constants import radius as _RSUN from .frameattributes import TimeFrameAttributeSunPy, ObserverCoordinateAttribute __all__ = ['HeliographicStonyhurst', 'HeliographicCarrington', 'Heliocentric', 'Helioprojective'] class SunPyBaseCoordinateFrame(BaseCoordinateFrame): """ * Defines a default longitude wrap angle of 180 degrees, which can be overridden by the class variable `_wrap_angle`. * Inject a nice way of representing the object which the coordinate represents. """ _wrap_angle = 180*u.deg def __init__(self, *args, **kwargs): self.object_name = None # If wrap_longitude=False is passed in, do not impose a specific wrap angle for the frame if not kwargs.pop('wrap_longitude', True): self._wrap_angle = None return super().__init__(*args, **kwargs) def represent_as(self, base, s='base', in_frame_units=False): """ If a frame wrap angle is set, use that wrap angle for any spherical representations. """ data = super().represent_as(base, s, in_frame_units=in_frame_units) if self._wrap_angle is not None and \ isinstance(data, (UnitSphericalRepresentation, SphericalRepresentation)): data.lon.wrap_angle = self._wrap_angle return data def __str__(self): """ We override this here so that when you print a SkyCoord it shows the observer as the string and not the whole massive coordinate. """ if getattr(self, "object_name", None): return f"<{self.__class__.__name__} Coordinate for '{self.object_name}'>" else: return super().__str__() class HeliographicStonyhurst(SunPyBaseCoordinateFrame): """ A coordinate or frame in the Stonyhurst Heliographic system. In a cartesian representation this is also known as the Heliocentric Earth Equatorial (HEEQ) system. This frame has its origin at the solar centre and the north pole above the solar north pole, and the zero line on longitude pointing towards the Earth. A new instance can be created using the following signatures (note that all the arguments must be supplied as keywords):: HeliographicStonyhurst(lon, lat, obstime) HeliographicStonyhurst(lon, lat, radius, obstime) HeliographicStonyhurst(x, y, z, obstime, representation_type='cartesian') Parameters ---------- representation : `~astropy.coordinates.BaseRepresentation` or `None` A representation object or None to have no data. lon : `~astropy.coordinates.Angle`, optional The longitude for this object (``lat`` must also be given and ``representation`` must be None). lat : `~astropy.coordinates.Angle`, optional The latitude for this object (``lon`` must also be given and ``representation`` must be None). radius : `~astropy.units.Quantity`, optional This quantity holds the radial distance. If not specified, it is, by default, the radius of the photosphere. x : `~astropy.units.Quantity`, optional x coordinate. y : `~astropy.units.Quantity`, optional y coordinate. z : `~astropy.units.Quantity`, optional z coordinate. obstime: `~sunpy.time.Time` The date and time of the observation, used to convert to heliographic carrington coordinates. Examples -------- >>> from astropy.coordinates import SkyCoord >>> import sunpy.coordinates >>> import astropy.units as u >>> sc = SkyCoord(1*u.deg, 1*u.deg, 2*u.km, ... frame="heliographic_stonyhurst", ... obstime="2010/01/01T00:00:45") >>> sc <SkyCoord (HeliographicStonyhurst: obstime=2010-01-01T00:00:45.000): (lon, lat, radius) in (deg, deg, km) (1., 1., 2.)> >>> sc.frame <HeliographicStonyhurst Coordinate (obstime=2010-01-01T00:00:45.000): (lon, lat, radius) in (deg, deg, km) (1., 1., 2.)> >>> sc = SkyCoord(HeliographicStonyhurst(-10*u.deg, 2*u.deg)) >>> sc <SkyCoord (HeliographicStonyhurst: obstime=None): (lon, lat, radius) in (deg, deg, km) (-10., 2., 695700.)> Notes ----- This frame will always be converted a 3D frame where the radius defaults to rsun. """ name = "heliographic_stonyhurst" default_representation = SphericalRepresentation frame_specific_representation_info = { SphericalRepresentation: [RepresentationMapping(reprname='lon', framename='lon', defaultunit=u.deg), RepresentationMapping(reprname='lat', framename='lat', defaultunit=u.deg), RepresentationMapping(reprname='distance', framename='radius', defaultunit=None)], CartesianRepresentation: [RepresentationMapping(reprname='x', framename='x'), RepresentationMapping(reprname='y', framename='y'), RepresentationMapping(reprname='z', framename='z')] } obstime = TimeFrameAttributeSunPy() def __init__(self, *args, **kwargs): _rep_kwarg = kwargs.get('representation_type', None) if ('radius' in kwargs and kwargs['radius'].unit is u.one and u.allclose(kwargs['radius'], 1*u.one)): kwargs['radius'] = _RSUN.to(u.km) super().__init__(*args, **kwargs) # Make 3D if specified as 2D # If representation was explicitly passed, do not change the rep. if not _rep_kwarg: # If we were passed a 3D rep extract the distance, otherwise # calculate it from _RSUN. if isinstance(self._data, UnitSphericalRepresentation): distance = _RSUN.to(u.km) self._data = SphericalRepresentation(lat=self._data.lat, lon=self._data.lon, distance=distance) class HeliographicCarrington(HeliographicStonyhurst): """ A coordinate or frame in the Carrington Heliographic system. - The origin is the centre of the Sun - The z-axis is aligned with the Sun's north pole - The x and y axes rotate with a period of 25.38 days. The line of zero longitude passed through the disk centre as seen from Earth at 21:36 on 9th Nov 1853. This frame differs from the Stonyhurst version in the definition of the longitude, which is defined using the time-dependant offset described above. Parameters ---------- representation: `~astropy.coordinates.BaseRepresentation` or None. A representation object. If specified, other parameters must be in keyword form. lon: `Angle` object. The longitude for this object (``lat`` must also be given and ``representation`` must be None). lat: `Angle` object. The latitude for this object (``lon`` must also be given and ``representation`` must be None). radius: `astropy.units.Quantity` object, optional, must be keyword. This quantity holds the radial distance. Defaults to the solar radius. obstime: SunPy Time The date and time of the observation, used to convert to heliographic carrington coordinates. Examples -------- >>> from astropy.coordinates import SkyCoord >>> import sunpy.coordinates >>> import astropy.units as u >>> sc = SkyCoord(1*u.deg, 2*u.deg, 3*u.km, ... frame="heliographic_carrington", ... obstime="2010/01/01T00:00:30") >>> sc <SkyCoord (HeliographicCarrington: obstime=2010-01-01T00:00:30.000): (lon, lat, radius) in (deg, deg, km) (1., 2., 3.)> >>> sc = SkyCoord([1,2,3]*u.deg, [4,5,6]*u.deg, [5,6,7]*u.km, ... obstime="2010/01/01T00:00:45", frame="heliographic_carrington") >>> sc <SkyCoord (HeliographicCarrington: obstime=2010-01-01T00:00:45.000): (lon, lat, radius) in (deg, deg, km) [(1., 4., 5.), (2., 5., 6.), (3., 6., 7.)]> """ name = "heliographic_carrington" default_representation = SphericalRepresentation frame_specific_representation_info = { SphericalRepresentation: [RepresentationMapping(reprname='lon', framename='lon', defaultunit=u.deg), RepresentationMapping(reprname='lat', framename='lat', defaultunit=u.deg), RepresentationMapping(reprname='distance', framename='radius', defaultunit=None)], UnitSphericalRepresentation: [RepresentationMapping(reprname='lon', framename='lon', defaultunit=u.deg), RepresentationMapping(reprname='lat', framename='lat', defaultunit=u.deg)], } _wrap_angle = 360*u.deg obstime = TimeFrameAttributeSunPy() class Heliocentric(SunPyBaseCoordinateFrame): """ A coordinate or frame in the Heliocentric system. - The origin is the centre of the Sun - The z-axis points from the centre of the Sun to the observer. - The y-axis is perpendicular to the z-axis, and lies in the plane that contains the z-axis and the solar rotation axis, pointing towards the Sun's north pole. This frame may either be specified in Cartesian or cylindrical representation. Cylindrical representation replaces (x, y) with (rho, psi) where rho is the impact parameter and psi is the position angle in degrees. Parameters ---------- representation: `~astropy.coordinates.BaseRepresentation` or None. A representation object. If specified, other parameters must be in keyword form and if x, y and z are specified, it must be None. x: `Quantity` object. X-axis coordinate, optional, must be keyword. y: `Quantity` object. Y-axis coordinate, optional, must be keyword. z: `Quantity` object. Shared by both representations. Z-axis coordinate, optional, must be keyword. observer: `~sunpy.coordinates.frames.HeliographicStonyhurst`, optional The coordinate of the observer in the solar system. Defaults to the Earth. obstime: SunPy Time The date and time of the observation, used to convert to heliographic carrington coordinates. Examples -------- >>> from astropy.coordinates import SkyCoord, CartesianRepresentation >>> import sunpy.coordinates >>> import astropy.units as u >>> sc = SkyCoord(CartesianRepresentation(10*u.km, 1*u.km, 2*u.km), ... obstime="2011/01/05T00:00:50", frame="heliocentric") >>> sc <SkyCoord (Heliocentric: obstime=2011-01-05T00:00:50.000, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (x, y, z) in km (10., 1., 2.)> >>> sc = SkyCoord([1,2]*u.km, [3,4]*u.m, [5,6]*u.cm, frame="heliocentric", obstime="2011/01/01T00:00:54") >>> sc <SkyCoord (Heliocentric: obstime=2011-01-01T00:00:54.000, observer=<HeliographicStonyhurst Coordinate for 'earth'>): (x, y, z) in (km, m, cm) [(1., 3., 5.), (2., 4., 6.)]> """ default_representation = CartesianRepresentation _frame_specific_representation_info = { CylindricalRepresentation: [RepresentationMapping('phi', 'psi', u.deg)] } obstime = TimeFrameAttributeSunPy() observer = ObserverCoordinateAttribute(HeliographicStonyhurst, default="earth") class Helioprojective(SunPyBaseCoordinateFrame): """ A coordinate or frame in the Helioprojective (Cartesian) system. This is a projective coordinate system centered around the observer. It is a full spherical coordinate system with position given as longitude theta_x and latitude theta_y. Parameters ---------- representation: `~astropy.coordinates.BaseRepresentation` or None. A representation object. If specified, other parameters must be in keyword form. Tx: `~astropy.coordinates.Angle` or `~astropy.units.Quantity` X-axis coordinate. Ty: `~astropy.coordinates.Angle` or `~astropy.units.Quantity` Y-axis coordinate. distance: `~astropy.units.Quantity` The radial distance from the observer to the coordinate point. obstime: SunPy Time The date and time of the observation, used to convert to heliographic carrington coordinates. observer: `~sunpy.coordinates.frames.HeliographicStonyhurst`, str The coordinate of the observer in the solar system. If you supply a string, it must be a solar system body that can be parsed by `~sunpy.coordinates.ephemeris.get_body_heliographic_stonyhurst`. rsun: `~astropy.units.Quantity`
warnings.warn('as_pandas_df is deprecated and will be removed in future release, ' 'please use "to_pandas" method', FutureWarning, stacklevel=2) return self.to_pandas() @classmethod def from_pandas(cls, df: 'pd.DataFrame', tz: str = 'UTC') -> 'Dataset': """ Creates a riptable Dataset from a pandas DataFrame. Pandas categoricals and datetime arrays are converted to their riptable counterparts. Any timezone-unaware datetime arrays (or those using a timezone not recognized by riptable) are localized to the timezone specified by the tz parameter. Recognized pandas timezones: UTC, GMT, US/Eastern, and Europe/Dublin Parameters ---------- df: pandas.DataFrame The pandas DataFrame to be converted tz: string A riptable-supported timezone ('UTC', 'NYC', 'DUBLIN', 'GMT') as fallback timezone. Returns ------- riptable.Dataset See Also -------- riptable.Dataset.to_pandas """ import pandas as pd data = {} for key in df.columns: col = df[key] dtype = col.dtype dtype_kind = dtype.kind iscat = False if hasattr(pd, 'CategoricalDtype'): iscat = isinstance(dtype, pd.CategoricalDtype) else: iscat = dtype.num == 100 if iscat or isinstance(col, pd.Categorical): codes = col.cat.codes categories = col.cat.categories # check for newer version of pandas if hasattr(codes, 'to_numpy'): codes = codes.to_numpy() categories = categories.to_numpy() else: codes = np.asarray(codes) categories = np.asarray(categories) data[key] = TypeRegister.Categorical(codes + 1, categories=categories) elif hasattr(pd, 'Int8Dtype') and \ isinstance(dtype, (pd.Int8Dtype, pd.Int16Dtype, pd.Int32Dtype, pd.Int64Dtype, pd.UInt8Dtype, pd.UInt16Dtype, pd.UInt32Dtype, pd.UInt64Dtype)): data[key] = np.asarray(col.fillna(INVALID_DICT[dtype.numpy_dtype.num]), dtype=dtype.numpy_dtype) elif dtype_kind == 'M': try: ptz = str(dtype.tz) try: _tz = _PANDAS_TO_RIPTABLE_TZ[ptz] except KeyError: raise ValueError( "Unable to convert a datetime array with timezone={}".format(ptz)) except AttributeError: _tz = tz data[key] = TypeRegister.DateTimeNano(np.asarray(col, dtype='i8'), from_tz='UTC', to_tz=_tz) elif dtype_kind == 'm': data[key] = TypeRegister.TimeSpan(np.asarray(col, dtype='i8')) elif dtype_kind == 'O': if len(col) > 0: first_element = col.iloc[0] if isinstance(first_element, (int, float, np.number)): # An object array with number (int or float) in it probably means there is # NaN in it so convert to float64. new_col = np.asarray(col, dtype='f8') else: try: new_col = np.asarray(col, dtype='S') except UnicodeEncodeError: new_col = np.asarray(col, dtype='U') else: new_col = np.asarray(col, dtype='S') data[key] = new_col else: data[key] = df[key] return cls(data) @staticmethod def from_arrow( tbl: 'pa.Table', zero_copy_only: bool = True, writable: bool = False, auto_widen: bool = False, fill_value: Optional[Mapping[str, Any]] = None ) -> 'Dataset': """ Convert a pyarrow `Table` to a riptable `Dataset`. Parameters ---------- tbl : pyarrow.Table zero_copy_only : bool, default True If True, an exception will be raised if the conversion to a `FastArray` would require copying the underlying data (e.g. in presence of nulls, or for non-primitive types). writable : bool, default False For `FastArray`s created with zero copy (view on the Arrow data), the resulting array is not writable (Arrow data is immutable). By setting this to True, a copy of the array is made to ensure it is writable. auto_widen : bool, optional, default to False When False (the default), if an arrow array contains a value which would be considered the 'invalid'/NA value for the equivalent dtype in a `FastArray`, raise an exception. When True, the converted array fill_value : Mapping[str, int or float or str or bytes or bool], optional, defaults to None Optional mapping providing non-default fill values to be used. May specify as many or as few columns as the caller likes. When None (or for any columns which don't have a fill value specified in the mapping) the riptable invalid value for the column (given it's dtype) will be used. Returns ------- Dataset Notes ----- This function does not currently support pyarrow's nested Tables. A future version of riptable may support nested Datasets in the same way (where a Dataset contains a mixture of arrays/columns or nested Datasets having the same number of rows), which would make it trivial to support that conversion. """ import pyarrow as pa ds_cols = {} for col_name, col in zip(tbl.column_names, tbl.columns): if isinstance(col, (pa.Array, pa.ChunkedArray)): rt_arr = FastArray.from_arrow(col, zero_copy_only=zero_copy_only, writable=writable, auto_widen=auto_widen) else: # Unknown/unsupported type being used as a column -- can't convert. raise RuntimeError(f"Unable to convert column '{col_name}' from object of type '{type(col)}'.") ds_cols[col_name] = rt_arr return Dataset(ds_cols) def to_arrow(self, *, preserve_fixed_bytes: bool = False, empty_strings_to_null: bool = True) -> 'pa.Table': """ Convert a riptable `Dataset` to a pyarrow `Table`. Parameters ---------- preserve_fixed_bytes : bool, optional, defaults to False For `FastArray` columns which are ASCII string arrays (dtype.kind == 'S'), set this parameter to True to produce a fixed-length binary array instead of a variable-length string array. empty_strings_to_null : bool, optional, defaults To True For `FastArray` columns which are ASCII or Unicode string arrays, specify True for this parameter to convert empty strings to nulls in the output. riptable inconsistently recognizes the empty string as an 'invalid', so this parameter allows the caller to specify which interpretation they want. Returns ------- pyarrow.Table Notes ----- TODO: Maybe add a ``destroy`` bool parameter here to indicate the original arrays should be deleted immediately after being converted to a pyarrow array? We'd need to handle the case where the pyarrow array object was created in "zero-copy" style and wraps our original array (vs. a new array having been allocated via pyarrow); in that case, it won't be safe to delete the original array. Or, maybe we just call 'del' anyway to decrement the object's refcount so it can be cleaned up sooner (if possible) vs. waiting for this whole method to complete and the GC and riptable "Recycler" to run? """ import pyarrow as pa # Convert each of the columns to a pyarrow array. arrow_col_dict = {} for col_name in self.keys(): orig_col = self[col_name] try: # Convert the column/array using the FastArray.to_arrow() method (or the inherited overload # for derived classes). This allows additional options to be passed when converting, to give # callers more flexibility. arrow_col = orig_col.to_arrow( preserve_fixed_bytes=preserve_fixed_bytes, empty_strings_to_null=empty_strings_to_null ) except BaseException as exc: # Create another exception which wraps the given exception and provides # the column name in the error message to make it easier to diagnose issues. raise RuntimeError(f"Unable to convert column '{col_name}' to a pyarrow array.") from exc arrow_col_dict[col_name] = arrow_col # Create the pyarrow.Table from the dictionary of pyarrow arrays. return pa.table(arrow_col_dict) @staticmethod def _axis_key(axis): try: return {0: 0, 'c': 0, 'C': 0, 'col': 0, 'COL': 0, 'column': 0, 'COLUMN': 0, 1: 1, 'r': 1, 'R': 1, 'row': 1, 'ROW': 1, None: None, 'all': None, 'ALL': None}[axis] except KeyError: raise NotImplementedError(f'Not a valid value for axis: {axis!r}.') # ------------------------------------------------------------- def any(self, axis: Optional[int] = 0, as_dataset: bool = True): """ Returns truth 'any' value along `axis`. Behavior for ``axis=None`` differs from pandas! Parameters ---------- axis : int, optional, default axis=0 * axis=0 (dflt.) -> over columns (returns Struct (or Dataset) of bools) string synonyms: c, C, col, COL, column, COLUMN * axis=1 -> over rows (returns array of bools) string synonyms: r, R, row, ROW * axis=None -> over rows and columns (returns bool) string synonyms: all, ALL as_dataset : bool When ``axis=0``, return Dataset instead of Struct. Defaults to False. Returns ------- Struct (or Dataset) or list or bool """ def _col_any(_col): try: return bool(_col.any()) except TypeError: return any(_col) axis = self._axis_key(axis) cond_rtn_type = type(self) if as_dataset else Struct if axis == 0: return cond_rtn_type({_cn: _col_any(_val) for _cn, _val in self.items()}) if axis is None: return any(_col_any(_val) for _cn, _val in self.items()) if axis == 1: # for each col, !=0 to get back bool array. then inplace OR all those results, careful with string arrays temparray=zeros(len(self), dtype=bool) for arr in self.values(): if arr.dtype.num <= 13: # inplace OR for numerical data # for cats we will assume 0 is the invalid and !=0 check works # not sure about nan handling temparray += arr != 0 else: # care about string array? if arr.dtype.char in 'US': temparray += arr != '' else: # skip this datatype pass return temparray raise NotImplementedError('Dataset.any(axis=<0, 1, None>)') # ------------------------------------------------------------- def duplicated(self, subset: Optional[Union[str, List[str]]] = None, keep: Union[bool, str] = 'first'): """
distribution by calculating its _moments """ height = x = y = width_x = width_y = 0.0 total = data.sum() if total > 0.0: xx, yy = np.indices(data.shape) x = (xx * data).sum() / total y = (yy * data).sum() / total col = data[:, int(y)] width_x = np.sqrt(np.abs((np.arange(col.size) - y) ** 2 * col).sum() / col.sum()) row = data[int(x), :] width_y = np.sqrt(np.abs((np.arange(row.size) - x) ** 2 * row).sum() / row.sum()) height = data.max() # print('h: %5.1f'%height,'x: %5.1f'%x, 'y: %5.1f'%y, 'wx: %5.1f'%width_x, 'wy: %5.1f'%width_y) return height, x, y, width_x, width_y # ------------------------------------------------------------------- def fit_gaussian_2d(data): """Returns (height, x, y, width_x, width_y) the _gaussian parameters of a 2D distribution found by a fit (ravel makes 1-dim array)""" params = _moments(data) success = 0 if params[0] > 0: errorfunction = lambda p: np.ravel(_gaussian(*p)(*np.indices(data.shape)) - data) p, success = optimize.leastsq(errorfunction, params) return p, success # ------------------------------------------------------------------- def get_fits_keys(header, fits_dict, res_dict, keyprint=False): """ gets fits-header from image and appends or overwrites the current fits-header dictionary. It also converts the specially coded D_ keys to res_dict keys and attributes floating type values updates res_dict :param header: :param fits_dict: dictionary of fits-header values :param res_dict: dictionary of distortion parameters (result of m-calib) :param keyprint: :return: updated fits_dict """ for key in fits_dict.keys(): if key in header.keys(): fits_dict[key] = header[key] if keyprint: print(key, header[key]) for key in res_dict.keys(): fkey = 'D_' + key.upper() if fkey in header.keys(): res_dict[key] = np.float32(header[fkey]) fits_dict[fkey] = np.float32(header[fkey]) if keyprint: print(key, fits_dict[fkey]) return fits_dict # ------------------------------------------------------------------- def get_fits_image(fimage): """ reads fits image data and header fimage: filename with or without extension converts 32-bit floating values and 16-bit data to Python compatible values reads also color images and transposes matrix to correct order (normalizes images to +/- 1 range) returns: image as np array, header """ fimage = change_extension(fimage, '.fit') im, header = fits.getdata(fimage, header=True) if int(header['BITPIX']) == -32: im = np.array(im) / 32767 elif int(header['BITPIX']) == 16: im = np.array(im) else: print(f'unsupported data format BITPIX: {header["BITPIX"]}') exit() if len(im.shape) == 3: im = np.transpose(im, (1, 2, 0)) return im, header # ------------------------------------------------------------------- def show_fits_image(file, imscale, image_element, contr=1.0, show=True): """ not needed at present, left in place for further use loads fits-image, adjusts contrast and scale and displays in GUI as tmp.png replaced by draw_scaled_image :param file: fits-file with extension :param imscale: scale for displayed image :param image_element: where to display image in GUI :param contr: image contrast :param show: if True, image_element isupdated, otherwise only 'tmp.png' is created :return: """ imbw, header = get_fits_image(file) if len(imbw.shape) == 2: im = my_rescale(imbw, imscale, multichannel=False) else: im = my_rescale(imbw, imscale, multichannel=True) im = im / np.max(im) * 255 * contr im = np.clip(im, 0.0, 255) with warnings.catch_warnings(): warnings.simplefilter("ignore") ios.imsave('tmp.png', np.flipud(im).astype(np.uint8)) if show: image_element.update(filename='tmp.png') return # ------------------------------------------------------------------- def select_rectangle(infile, start, res_dict, fits_dict, wloc, outfil, maxim): """ displays new window with image infile + start + 'fit a rectangle around the selected line can be selected with dragging the mouse :param infile: filebase of image :param start: index of selected image :param res_dict: dictionary :param fits_dict: " :param wloc: location of displayed window for selection :param outfil: :param maxim: :return: Ok if rectangle selected, x0, y0: center coordinates of selected rectangle (int) dx, dy: half width and height of selected rectangle (int) """ im, header = get_fits_image(infile + str(start)) im = im / np.max(im) get_fits_keys(header, fits_dict, res_dict, keyprint=False) # #=================================================================== # new rect_plt # first get size of graph from tmp.png and size of image # graph coordinates are in image pixels! (imy, imx) = im.shape[:2] image_file = 'tmp.png' # scaled image imbw = np.flipud(ios.imread(image_file)) # get shape (canvasy, canvasx) = imbw.shape[:2] wlocw = (wloc[0] + 300, wloc[1] + 50) # check for old files delete_old_files(outfil, maxim, ext='.fit') image_elem_sel = [sg.Graph( canvas_size=(canvasx, canvasy), graph_bottom_left=(0, 0), # starts at top, set y-scale here graph_top_right=(imx, imy), # set x-scale here key='-GRAPH-', change_submits=True, # mouse click events drag_submits=True)] layout_select = [[sg.Text('Start File: ' + infile + str(start), size=(50, 1)), sg.Text(key='info', size=(40, 1)), sg.Ok(), sg.Cancel()], image_elem_sel] # --------------------------------------------------------------------------- winselect_active = True winselect = sg.Window(f'select zero order or spectral line', layout_select, finalize=True, location=wlocw, keep_on_top=True, no_titlebar=False, disable_close=False, disable_minimize=True) # get the graph element for ease of use later graph = winselect['-GRAPH-'] # type: sg.Graph graph.draw_image(image_file, location=(0, imy)) if image_file else None winselect.refresh() dragging = False start_point = end_point = prior_rect = None x0 = y0 = dx = dy = 0 while winselect_active: event, values = winselect.read() idg = graph.draw_rectangle((0, 0), (imx, imy), line_color='blue') if event == "-GRAPH-": # if there's a "Graph" event, then it's a mouse x, y = (values["-GRAPH-"]) if not dragging: start_point = (x, y) dragging = True else: end_point = (x, y) if prior_rect: graph.delete_figure(prior_rect) if None not in (start_point, end_point): prior_rect = graph.draw_rectangle(start_point, end_point, line_color='red') elif event is not None and event.endswith('+UP'): # The drawing has ended because mouse up xy0 = [int(0.5 * (start_point[0] + end_point[0])), int(0.5 * (start_point[1] + end_point[1]))] size = (abs(start_point[0] - end_point[0]), abs(start_point[1] - end_point[1])) info = winselect["info"] info.update(value=f"grabbed rectangle at {xy0} with size {size}") start_point, end_point = None, None # enable grabbing a new rect dragging = False if min(size[0], size[1]) > 1: # rectangle info.update(value=f"rectangle at {xy0} with size {size}") x0 = xy0[0] y0 = xy0[1] dx = int((size[0] + 1) / 2) dy = int((size[1] + 1) / 2) elif event in ('Ok', 'Cancel', None): graph.delete_figure(idg) winselect_active = False winselect.close() return event, x0, y0, dx, dy # ------------------------------------------------------------------- def add_rows_apply_tilt_slant(outfile, par_dict, res_dict, fits_dict, opt_dict, contr, wlocw, restext, regtext, window): """ displays new window with image outfile.fit for selection of rows to be added allows adjustment of tilt and slant after selection of rows if Ok, images outfile + ['st.fit', 'st,png'] are saved :param outfile: :param par_dict: :param res_dict: :param fits_dict: :param opt_dict: # :param imscale: :param contr: :param wlocw: :param restext: :param regtext: :param window: :return: Ok if selection is accepted tilt, slant: selected values for image outfile + ['st.fit', 'st,png'] """ def _slant_tilt_mapping(xy, center, dx, dy): """ Calculate shifted coordinates: xs = x' - (y'-y0)*dx (slant) ys = y' - (x'-x0)*dy (tilt) (Pixel value at x',y': I'(x',y') = I(x,y) in the original image) """ x, y = xy.T x0, y0 = center xy[..., 0] = x - (y - y0) * dx xy[..., 1] = y - (x - x0) * dy return xy tilt = 0.0 slant = 0.0 ymin = 0 ymax = 0 idg = None im, header = get_fits_image(outfile) if 'D_X00' in header.keys(): dist = True else: dist = False if debug: print(np.max(im)) im = im / np.max(im) imtilt = im_ori = im fits_dict = get_fits_keys(header, fits_dict, res_dict, keyprint=False) write_fits_image(imtilt, outfile + 'st.fit', fits_dict, dist=dist) # used for calibration, if no tilt, slant # new rect_plt (imy, imx) = im.shape[:2] imbw = np.flipud(ios.imread('tmp.png')) # get shape (canvasy, canvasx) = imbw.shape[:2] # wlocw = (wloc[0] + 300, wloc[1] + 100) image_file = 'tmp.png' # ------------------------------------------------------------------- par_dict['i_imx'] = imx par_dict['i_imy'] = imy image_elem_sel = [sg.Graph( canvas_size=(canvasx, canvasy), graph_bottom_left=(0, 0), graph_top_right=(imx, imy), # set x- and y-scale here key='-GRAPH-', change_submits=True, # mouse click events drag_submits=True)] layout_select = [[sg.Text('Start File: ' + outfile, size=(40, 1)), sg.Checkbox('correct background', key='-BACK-'), sg.Text('Tilt'), sg.InputText(tilt, size=(6, 1), key='-TILT-'), sg.Text('Slant'), sg.InputText(slant, size=(6, 1), key='-SLANT-'), sg.Button('Apply', key='-APPLY_TS-', bind_return_key=True), sg.Ok(), sg.Cancel()], image_elem_sel, [sg.Text(key='info', size=(60, 1))]] # --------------------------------------------------------------------------- winselect_active = True winselect = sg.Window(f'select rows for 1-D sum spectrum, apply tilt and slant', layout_select, finalize=True, location=wlocw, keep_on_top=True, no_titlebar=False, disable_close=False, disable_minimize=True) # get the graph element for ease of use later graph = winselect['-GRAPH-'] # type: sg.Graph graph.draw_image(image_file, location=(0, imy)) if image_file else None dragging = False start_point = end_point = prior_rect = upper_back = lower_back = None while winselect_active: event, values = winselect.read() graph.draw_rectangle((0, 0), (imx, imy), line_color='blue') if event == "-GRAPH-": # if there's a "Graph" event, then it's a mouse background = values['-BACK-'] x, y = (values["-GRAPH-"]) if not dragging: start_point = (x, y) dragging =
= [" interiming ", " foreseeing ", " tarrying ", " holding onto thine hat "] move = [" persuade ", " inspire ", " excite "] moving = [" persuading ", " inspiring ", " exciting "] win = [" triumph "] won = [" achieved "] reward = [" endowment", " conferment", " guerdon", " spoil"] almost = [" nigh ", " well-nigh "] around = [" encompassing "] clothes = [" raiments "] getting = [" reaping ", " appropriating ", " assimilating "] infinite = [" interminably "] choose = [" predestine ", " commit oneself "] choice = [" volition " ] can = [" may ", " beest capable of ", " commit "] we = [" our own selves "] tell = [" acquaint ", " apprise " ] meet = [" rendezvous ", " foregather ", " convene " ] again = [" reiteratively "] seems = [" occurs "] too = [" exorbitantly "] late = [" unpunctual "] where = [" unto what venue "] has = [" falls upon ", " includes ", " embraces ", " suffers "] rich = [" opulent "] happened = [" befallen "] place = [" venue "] get = [ " reap ", " appropriate ", " assimilate " ] got = [ " reaped ", " appropriated ", " assimilated " ] but = [" nevertheless "] quick = [" expeditious ", " presto ", " in short order "] quicker = [" expeditiously ", " hastily "] maybe = [" perchance ", " conceivably ", " weather permitting "] exactly = [" precisely "] convert = [" appropriate "] explain = [" convey ", " elucidate "] explanation = [" elucidation "] suppose = [" presume "] supposed = [" presumed "] getting = [" reaping ", " appropriating ", " assimilating "] better = [" ameliorate ", " sophisticated ", " exceptional "] become = [" metamorphose ", " harmonize ", " embellish "] becoming = [" metamorphosing to ", " harmonizing to ", " embellishing to "] became = [" metamorphosed to ", " harmonized to ", " embellished to "] before = [" antecedent to ", " preceding "] after = [" subsequently ", " ensuing "] least = [" feeblest "] most = [" utmost ", " highest degree "] large = [" exorbitant ", " generous ", " considerable "] small = [" inadequate ", " inconsequential ", " diminutive ", " humble "] upgrade = [" enrich ", " ameliorate "] upgrades = [" enrichments ", " ameliorates " ] upgraded = [" enriched ", " ameliorated "] add = [" append "] remove = [" expel "] bet = [" parlay "] accept = [" acquiesce ", " sympathize "] accepted = [" acquiesced ", " sympathized "] see = [" discern "] event = [" conjuncture ", " ceremony "] events = [" conjunctures ", " ceremonies "] bad = [" inadequate "] rude = [" peremptory "] polite = [" affable "] kin = [" lineage ", " consanguinity ", " kindred "] story = [" anecdote ", " spiel ", " apologue ", " allegory "] wealth = [" luxuriance ", " affluence "] amount = [" magnitude ", " expanse ", " passel "] joke = [" quip "] joking = [" facetious "] jokingly = [" facetiously "] quit = [" relinquish "] quiting = [" relinquishing "] begin = [" commence "] beginning = [" commencement ", " genesis "] say = [" express "] saying = [" expressing "] says = [" expresses "] said = [" aforementioned "] tell = [" apprise "] told = [" apprised "] guess = [" conjecture "] guessing = [" conjecturing "] here = [" hither "] lack = [" exiguity "] lacking = [" exiguous "] fair = [" honorable "] honest = [" virtuous "] even = [" conceivably "] done = [" concluded "] things = [" occurrences "] ask = [" inquire "] asking = [" inquiring "] asked = [" inquired "] full = [" abounding "] crystal = [" lucent "] clean = [" limpid "] check = [" audit "] out = [" superficially "] worker = [" peasant "] workers = [" peasants "] many = [" abounding "] luck = [" fortune "] over = [" aloft "] dare = [" strife ", " contention "] update = [" refurbish "] updates = [" renovations "] phrase = [" expression "] phrases = [" verbiage "] none = [" nil "] allw = [" ensemble "] morning = [" dawn ", " sunrise "] noon = [" meridian ", " noontide "] night = [" nightfall ", " dusk ", " twilight "] effort = [" resolution "] currently = [" in aforementioned juncture "] moment = [" trice "] suggest = [" propose "] suggested = [ " proposed "] suggestion = [" proposition "] parent = [" antecedent "] father = [" begetter "] mother = [" forebearer "] child = [" bairn "] double = [" duple "] triple = [" trine ", " treble "] camera = [" daguerreotype "] cup = [" chalice "] nextw = [" subsequent "] bar = [" lounge ", " bistro "] walk = [" wander "] walked = [" wandered "] walking = [" wandering "] value = [" desirability "] reason = [" vindication "] reasons = [" vindications "] because = [" by virtue of "] since = [" therefore ", " hence "] these = [" the indicated ", " the particular "] give = [" bestow "] attempt = [" endeavor "] attempted = [" endeavored "] attempting = [" endeavoring "] tryw = [" pursue "] tried = [" pursued "] trying = [" pursueing "] have = [" fall upon " , " include ", " embrace " , " suffer "] having = [" falling upon ", " including ", " embracing ", " suffering "] at = [" upon "] problem = [" dilemma "] problems = [" dilemmas "] whole = [" plenary "] entire = [" inclusive "] face = [" visage "] #functions def change(text, words, translate): for x in range(0, len(translate)): translate = translate.replace(text ,words[randint(0 , len(words)-1)]) return translate #Main while(True): translation = input("< ") translation = " "+translation+" " #fix for x in range(0, len(translation)): translation = translation.replace("'ve " ," have ") for x in range(0, len(translation)): translation = translation.replace("n't " ," nay ") for x in range(0, len(translation)): translation = translation.replace("'re " ," art ") for x in range(0, len(translation)): translation = translation.replace("'m " ," am ") for x in range(0, len(translation)): translation = translation.replace(" ca " ," can ") for x in range(0, len(translation)): translation = translation.replace(" wo " ," shalt ") #translate translation = change(" mean ", mean, translation) translation = change(" and ", andw, translation) translation = change(" also ", also, translation) translation = change(" some ", some, translation) translation = change(" any ", some, translation) translation = change(" one ", one, translation) translation = change(" every ", every, translation) translation = change(" not ", notw, translation) translation = change(" yes ", yes, translation) translation = change(" new ", new, translation) translation = change(" old ", old, translation) translation = change(" skill ", skill, translation) translation = change(" skilled ", skilled, translation) translation = change(" had ", had, translation) translation = change(" correct ", correct, translation) translation = change(" true ", correct, translation) translation = change(" today ", today, translation) translation = change(" know ", know, translation) translation = change(" knowing ", knowing, translation) translation = change(" work ", work, translation) translation = change(" worked ", worked, translation) translation = change(" working ", working, translation) translation = change(" try ", tryw, translation) translation = change(" tried ", tried, translation) translation = change(" trying ", trying, translation) translation = change(" attempt ", attempt, translation) translation = change(" attempted ", attempted, translation) translation = change(" attempting ", attempting, translation) translation = change(" want ", want, translation) translation = change(" would ", want, translation) translation = change(" like2 ", enjoy, translation) translation = change(" enjoy ", enjoy, translation) translation = change(" enjoyable ", enjoyable, translation) translation = change(" enjoyment ", enjoyment, translation) translation = change(" need ", need, translation) translation = change(" needs ", needs, translation) translation = change(" needing ", needing, translation) translation = change(" no ", no, translation) translation = change(" needing ", needing, translation) translation = change(" so ", so, translation) translation = change(" how ", how, translation) translation = change(" who ", who, translation) translation = change(" the ", the, translation) translation = change(" they ", them, translation) translation = change(" them ", them, translation) translation = change(" you ", you, translation) translation = change(" your ", your, translation) translation = change(" their ", your, translation) translation = change(" yours ", yours, translation) translation = change(" doing ", doing, translation) translation = change(" hey
<filename>abacus_extension.py #!/usr/bin/env python # -*- coding: utf-8 -*- """ This module defines a class `PartitionExt` which extends the `Partition` class in SageMath with methods related to the computation of the generalized core and quotient decomposition as described in [Pearce, 2020]. See the docstring of the `PartitionExt` class for a description of the available methods. This module further includes the following functions related to constructing partitions from generalised core and quotient data * from_G_core_and_quotient(core, quotient, r, b=-1) - Create partition from G-core-quotient decomposition w.r.t. $(r,b)$-action * from_G_charges_and_quotient(charges, quotient, r, b=-1) - Create partition from $(r,b)$-charge coordinates and quotient * from_G_abacus(abacus, r=None, b=-1) - Create partition from a representation of its $(r,b)$-abacus. """ # Standard library imports from collections import deque # deque is list-like container with faster popleft # SageMath imports import sage.all # Required to run this module from a Python interpreter/kernel (not required for Sage kernel) from sage.combinat.partition import Partition, Partitions from sage.combinat.partition_tuple import PartitionTuple # Used for compatibility with `Partition.quotient` method from sage.arith.all import gcd # Local packages __author__ = '<NAME>' __copyright__ = 'Copyright 2020, PyParti (Suanpan Project)' __credits__ = ['<NAME>'] __license__ = 'MIT' __version__ = '1.0.0' __maintainer__ = '<NAME>' __email__ = '<EMAIL>' __status__ = 'Development' class PartitionExt(Partition): r"""Extends the `Partition` class in SageMath with the following methods: * G_colour_tableau(self, r, b=-1) - Returns tableau of `self` with cells coloured by $(i,j) \mapsto i + bj \ (\mathrm{mod}\ r)$. * G_colour_count(self, r, b=-1) - Counts the number of cells in `self` of each colour under the $(r,b)$-colouring. * G_weighted_hook_lengths(self, r, b=-1) - Returns tableau of `self` with cells numbered according to the formula $$\ell(\square) - b(a(\square) + 1) \ (\mathrm{mod}\ r).$$ * is_G_core(self, r, b=-1) - Checks whether `self` is a G-core with respect to the $(r,b)$-action * G_core(self, r, b=-1) - Returns the G-core partition of `self` with respect to the $(r,b)$-action * G_quotient(self, r, b=-1) - Returns the G-quotient of `self` with respect to the $(r,b)$-action, an $r$-tuple of partitions * G_abacus(self, r, b=-1) - Returns an $r$-tuple of path sequences {1:N, 0:E} corresponding to `self` and the $(r,b)$-action * G_charges(self, r, b=-1) - Returns the charge coordinates of `self` with respect to the $(r,b)$-action, an $r$-tuple of integers In the default case that only one argument `r` is passed, the action is of type $(r,-1) = (r,r-1)$ which is special linear and yields the same result as the classical `core(r)` and `quotient(r)` methods. """ def __init__(self, mu): """ Initialize `self`. """ assert isinstance(mu, Partition) self._list = mu._list __repr__ = Partition._repr_list def G_colour_tableau(self, r, b=-1): r""" Returns tableau of `self` with cells coloured according to the $(r,b)$-action on monomials over $\mathbb{C}[x,y]$. Specifically, a cell $(i,j)$ is mapped to colour value $i + bj \ (\mathrm{mod}\ r)$. """ return [[(i + b * j) % r for j in range(self[i])] for i in range(len(self))] def G_colour_count(self, r, b=-1): r""" Counts the number of cells in `self` of each colour under the $(r,b)$-colouring. """ counts = [0 for _ in range(r)] for row in self.G_colour_tableau(r,b): for cell_colour in row: counts[cell_colour] += 1 assert sum(counts) == self.size() return counts def G_weighted_hook_lengths(self, r, b=-1): r""" Returns tableau of `self` with cells numbered according to the formula $$\ell(\square) - b(a(\square) + 1) \ (\mathrm{mod}\ r).$$ In particular, a partition $p$ is called an $(r,b)$-core if none of the cells in this tableau are 0. """ return [[cell % r for cell in row] for row in self.upper_hook_lengths(-b)] def is_G_core(self, r, b=-1): r"""Checks whether `self` is a G-core with respect to the $(r,b)$-action. Returns: Bool A partition is said to be an $(r,b)$-core if it has no cells satisfying the congruence equation $$\ell(\square) - b(a(\square) + 1) \equiv 0 \ (\mathrm{mod}\ r).$$ Equivalently, a partition is said to be an $(r,b)$-core if it is its own $(r,b)$-core (where the latter is defined as in `G_core()`). """ return not any(any((cell % r) == 0 for cell in row) for row in self.upper_hook_lengths(-b)) def G_abacus(self, r, b=-1, method='fast'): r""" Converts a partition to a finite representation of its $G$-abacus, where the $G$-action is of type $(r,b)$. First, a sequence encoding the border path of the partition is calculated, with convention {1:N, 0:E} in English notation Next, we separate path segments in the sequence onto $r$ different abacus wires according to their location in the lattice $\mathbb{Z}^{2}$ and the lattice colouring defined by the $(r,b)$-action. Returns: Length $r$ list of path sequences (themselves lists of '0's and '1's) Note that every full path sequence starts with infinitely many '1's and ends with infinitely many '0's. """ if method == 'fast': # Reads abacus from minimal zero-one sequence, swapping '0' and '1' to ensure {1:N, 0:E} convention seq = invert_zero_one(self.zero_one_sequence()) elif method == 'slow': # Dyck word is always longer than minimal zero-one sequence, so will take longer to read seq = self.to_dyck_word() abacus = [deque() for _ in range(r)] wire_num = sum(seq) % r # counts number of '1's modulo r to find the starting wire index for code in seq: abacus[wire_num].append(code) # The next wire to read from depends on the value of the current symbol # Add b to the wire index if '0' was read, else subtract 1 if '1' was read, then reduce modulo r wire_num = (wire_num + b * (1 - code) - code) % r return abacus def G_core(self, r, b=-1): r""" Calculates the G-core partition of `self` with respect to the $(r,b)$-action, First, the G-charge coordinates of `self` are computed via the G-abacus. These are preserved by valid abacus moves. Next, we implicitly slide beads on the G-abacus to remove any quotient component whilst preserving charges. Finally, the modified abacus is converted back to a partition. Returns: an instance of the PartitionExt class """ return self.from_G_charges_and_quotient(self.G_charges(r,b), quotient=None, r=r, b=b) def G_quotient(self, r, b=-1, label_swap_xy=False): r""" Calculates the G-quotient of `self` with respect to the $(r,b)$-action, First, the G-abacus of `self` is computed. Second, the path sequence of each abacus wire is converted back to a partition. Note that the abacus uses the convention {1:N, 0:E} in English notation for partition border paths Due to differences in conventions effectively swapping xy-coordinates for cell colouring (vs. content), the order of partitions in the $(r,r-1)$-quotient ($b=-1$ special case) differs from the classical $r$-quotient by a reflection of indices. This can be accounted for by setting the optional `label_swap_xy` keyword argument to `True`. A cell $(i,j)$ is mapped as `Partition.content` -> $j - i$, whilst $(r,-1)$-colour -> $i - j (mod r)$ (as case of $(r,b)$-colour -> $i + bj (mod r)$). Returns: an $r$-tuple of partitions """ # Sagemath uses the convention {1:E, 0:N} when reading partition from a path sequence, so we have to swap '0's and '1's p_list = [Partition(zero_one=invert_zero_one(wire)) for wire in self.G_abacus(r,b)] # Reflect the order of partitions in the $b=-1$ case `G_quotient` to account for differences in conventions for cell colouring # for compatibility with `Partition.quotient`. if label_swap_xy: p_list = [p_list[0]] + p_list[:0:-1] # Cast the list of partitions in the quotient as a `PartitionTuple` for compatibility with the `Partition.quotient` method return PartitionTuple(p_list) def G_charges(self, r, b=-1): r""" Calculates the charge coordinates of `self` with respect to the $(r,b)$-action, returning an $r$-tuple of integers. The charge on each wire of the $(r,b)$-abacus of `self` describes the excess or deficit of electrons ('1' symbols) relative to a fixed ground state - the $(r,b)$-abacus of the empty partition (the vacuum). By construction, global charge (i.e. the sum of charge coordinates) is zero. """ abacus = self.G_abacus(r, b) total_north_steps = sum(sum(wire) for wire in abacus) # For reference to a fixed ground state, we calculate the number of north steps ('1' symbols) on each wire that # we would expect if an abacus with the same number of total north steps was generated from the empty partition expected = [(total_north_steps //
check_obj.apply(check_fn, axis=1) if isinstance(check_obj, pd.DataFrame) else check_obj.map(check_fn) if isinstance(check_obj, pd.Series) else check_fn(check_obj) ) else: # vectorized check function case check_output = check_fn(check_obj) # failure cases only apply when the check function returns a boolean # series that matches the shape and index of the check_obj if ( isinstance(check_obj, dict) or isinstance(check_output, bool) or not isinstance(check_output, (pd.Series, pd.DataFrame)) or check_obj.shape[0] != check_output.shape[0] or (check_obj.index != check_output.index).all() ): failure_cases = None elif isinstance(check_output, pd.Series): if self.ignore_na: isna = ( check_obj.isna().any(axis="columns") if isinstance(check_obj, pd.DataFrame) else check_obj.isna() ) check_output = check_output | isna failure_cases = check_obj[~check_output] elif isinstance(check_output, pd.DataFrame): # check results consisting of a boolean dataframe should be # reported at the most granular level. check_output = check_output.unstack() if self.ignore_na: check_output = check_output | df_or_series.unstack().isna() failure_cases = ( check_obj.unstack()[~check_output] .rename("failure_case") .rename_axis(["column", "index"]) .reset_index() ) else: raise TypeError( f"output type of check_fn not recognized: {type(check_output)}" ) if failure_cases is not None and self.n_failure_cases is not None: failure_cases = failure_cases.drop_duplicates().iloc[ : self.n_failure_cases ] check_passed = ( check_output.all() if isinstance(check_output, pd.Series) else check_output.all(axis=None) if isinstance(check_output, pd.DataFrame) else check_output ) return CheckResult( check_output, check_passed, check_obj, failure_cases ) def __eq__(self, other): are_check_fn_objects_equal = ( self.__dict__["_check_fn"].__code__.co_code == other.__dict__["_check_fn"].__code__.co_code ) try: are_strategy_fn_objects_equal = all( getattr(self.__dict__.get("strategy"), attr) == getattr(other.__dict__.get("strategy"), attr) for attr in ["func", "args", "keywords"] ) except AttributeError: are_strategy_fn_objects_equal = True are_all_other_check_attributes_equal = { i: self.__dict__[i] for i in self.__dict__ if i not in ["_check_fn", "strategy"] } == { i: other.__dict__[i] for i in other.__dict__ if i not in ["_check_fn", "strategy"] } return ( are_check_fn_objects_equal and are_strategy_fn_objects_equal and are_all_other_check_attributes_equal ) def __hash__(self): return hash(self.__dict__["_check_fn"].__code__.co_code) def __repr__(self): return ( f"<Check {self.name}: {self.error}>" if self.error is not None else f"<Check {self.name}>" ) class _CheckMeta(type): # pragma: no cover """Check metaclass.""" def __getattr__(cls, name: str) -> Any: """Prevent attribute errors for registered checks.""" attr = cls.__dict__.get(name) if attr is None: raise AttributeError(f"'{cls}' object has no attribute '{name}'") return attr class Check(_CheckBase, metaclass=_CheckMeta): """Check a pandas Series or DataFrame for certain properties.""" REGISTERED_CUSTOM_CHECKS: Dict[str, Callable] = {} # noqa @classmethod @st.register_check_strategy(st.eq_strategy) @register_check_statistics(["value"]) def equal_to(cls, value, **kwargs) -> "Check": """Ensure all elements of a series equal a certain value. *New in version 0.4.5* Alias: ``eq`` :param value: All elements of a given :class:`pandas.Series` must have this value :param kwargs: key-word arguments passed into the `Check` initializer. :returns: :class:`Check` object """ def _equal(series: pd.Series) -> pd.Series: """Comparison function for check""" return series == value return cls( _equal, name=cls.equal_to.__name__, error=f"equal_to({value})", **kwargs, ) eq = equal_to @classmethod @st.register_check_strategy(st.ne_strategy) @register_check_statistics(["value"]) def not_equal_to(cls, value, **kwargs) -> "Check": """Ensure no elements of a series equals a certain value. *New in version 0.4.5* Alias: ``ne`` :param value: This value must not occur in the checked :class:`pandas.Series`. :param kwargs: key-word arguments passed into the `Check` initializer. :returns: :class:`Check` object """ def _not_equal(series: pd.Series) -> pd.Series: """Comparison function for check""" return series != value return cls( _not_equal, name=cls.not_equal_to.__name__, error=f"not_equal_to({value})", **kwargs, ) ne = not_equal_to @classmethod @st.register_check_strategy(st.gt_strategy) @register_check_statistics(["min_value"]) def greater_than(cls, min_value, **kwargs) -> "Check": """Ensure values of a series are strictly greater than a minimum value. *New in version 0.4.5* Alias: ``gt`` :param min_value: Lower bound to be exceeded. Must be a type comparable to the dtype of the :class:`pandas.Series` to be validated (e.g. a numerical type for float or int and a datetime for datetime). :param kwargs: key-word arguments passed into the `Check` initializer. :returns: :class:`Check` object """ if min_value is None: raise ValueError("min_value must not be None") def _greater_than(series: pd.Series) -> pd.Series: """Comparison function for check""" return series > min_value return cls( _greater_than, name=cls.greater_than.__name__, error=f"greater_than({min_value})", **kwargs, ) gt = greater_than @classmethod @st.register_check_strategy(st.ge_strategy) @register_check_statistics(["min_value"]) def greater_than_or_equal_to(cls, min_value, **kwargs) -> "Check": """Ensure all values are greater or equal a certain value. *New in version 0.4.5* Alias: ``ge`` :param min_value: Allowed minimum value for values of a series. Must be a type comparable to the dtype of the :class:`pandas.Series` to be validated. :param kwargs: key-word arguments passed into the `Check` initializer. :returns: :class:`Check` object """ if min_value is None: raise ValueError("min_value must not be None") def _greater_or_equal(series: pd.Series) -> pd.Series: """Comparison function for check""" return series >= min_value return cls( _greater_or_equal, name=cls.greater_than_or_equal_to.__name__, error=f"greater_than_or_equal_to({min_value})", **kwargs, ) ge = greater_than_or_equal_to @classmethod @st.register_check_strategy(st.lt_strategy) @register_check_statistics(["max_value"]) def less_than(cls, max_value, **kwargs) -> "Check": """Ensure values of a series are strictly below a maximum value. *New in version 0.4.5* Alias: ``lt`` :param max_value: All elements of a series must be strictly smaller than this. Must be a type comparable to the dtype of the :class:`pandas.Series` to be validated. :param kwargs: key-word arguments passed into the `Check` initializer. :returns: :class:`Check` object """ if max_value is None: raise ValueError("max_value must not be None") def _less_than(series: pd.Series) -> pd.Series: """Comparison function for check""" return series < max_value return cls( _less_than, name=cls.less_than.__name__, error=f"less_than({max_value})", **kwargs, ) lt = less_than @classmethod @st.register_check_strategy(st.le_strategy) @register_check_statistics(["max_value"]) def less_than_or_equal_to(cls, max_value, **kwargs) -> "Check": """Ensure values are less than or equal to a maximum value. *New in version 0.4.5* Alias: ``le`` :param max_value: Upper bound not to be exceeded. Must be a type comparable to the dtype of the :class:`pandas.Series` to be validated. :param kwargs: key-word arguments passed into the `Check` initializer. :returns: :class:`Check` object """ if max_value is None: raise ValueError("max_value must not be None") def _less_or_equal(series: pd.Series) -> pd.Series: """Comparison function for check""" return series <= max_value return cls( _less_or_equal, name=cls.less_than_or_equal_to.__name__, error=f"less_than_or_equal_to({max_value})", **kwargs, ) le = less_than_or_equal_to @classmethod @st.register_check_strategy(st.in_range_strategy) @register_check_statistics( ["min_value", "max_value", "include_min", "include_max"] ) def in_range( cls, min_value, max_value, include_min=True, include_max=True, **kwargs ) -> "Check": """Ensure all values of a series are within an interval. :param min_value: Left / lower endpoint of the interval. :param max_value: Right / upper endpoint of the interval. Must not be smaller than min_value. :param include_min: Defines whether min_value is also an allowed value (the default) or whether all values must be strictly greater than min_value. :param include_max: Defines whether min_value is also an allowed value (the default) or whether all values must be strictly smaller than max_value. :param kwargs: key-word arguments passed into the `Check` initializer. Both endpoints must be a type comparable to the dtype of the :class:`pandas.Series` to be validated. :returns: :class:`Check` object """ if min_value is None: raise ValueError("min_value must not be None") if max_value is None: raise ValueError("max_value must not be None") if max_value < min_value or ( min_value == max_value and (not include_min or not include_max) ): raise ValueError( "The combination of min_value = %s and max_value = %s " "defines an empty interval!" % (min_value, max_value) ) # Using functions from operator module to keep conditions out of the # closure left_op = operator.le if include_min else operator.lt right_op = operator.ge if include_max else operator.gt def _in_range(series: pd.Series) -> pd.Series: """Comparison function for check""" return left_op(min_value, series) & right_op(max_value, series) return cls( _in_range, name=cls.in_range.__name__, error=f"in_range({min_value}, {max_value})", **kwargs, ) @classmethod @st.register_check_strategy(st.isin_strategy) @register_check_statistics(["allowed_values"]) def isin(cls, allowed_values: Iterable, **kwargs) -> "Check": """Ensure only allowed values occur within a series. :param allowed_values: The set of allowed values. May be any iterable. :param kwargs: key-word arguments passed into the `Check` initializer. :returns: :class:`Check` object .. note:: It is checked whether all elements of a :class:`pandas.Series` are part of the set of elements of allowed values. If allowed values is a string, the set of elements consists of all distinct characters of the string. Thus only single characters which occur in allowed_values at least once can meet this condition. If you want to check for substrings use :func:`Check.str_is_substring`. """ # Turn allowed_values into a set. Not only for performance but also # avoid issues with a mutable argument passed by reference which may be # changed from outside. try: allowed_values = frozenset(allowed_values) except TypeError as exc: raise ValueError( f"Argument allowed_values must be iterable. Got {allowed_values}" ) from exc def _isin(series: pd.Series) -> pd.Series: """Comparison function for check""" return series.isin(allowed_values) return cls( _isin, name=cls.isin.__name__, error=f"isin({set(allowed_values)})", **kwargs, ) @classmethod @st.register_check_strategy(st.notin_strategy) @register_check_statistics(["forbidden_values"]) def notin(cls, forbidden_values: Iterable, **kwargs) -> "Check": """Ensure
outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='nrProc'): pass def exportChildren(self, outfile, level, namespace_='', name_='nrProc', fromsubclass_=False, pretty_print=True): pass def build(self, node): already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass # end class nrProc class fimAfastamento(GeneratedsSuper): """Informações do Término do Afastamento""" subclass = None superclass = None def __init__(self, dtTermAfast=None): self.original_tagname_ = None if isinstance(dtTermAfast, BaseStrType_): initvalue_ = datetime_.datetime.strptime(dtTermAfast, '%Y-%m-%d').date() else: initvalue_ = dtTermAfast self.dtTermAfast = initvalue_ def factory(*args_, **kwargs_): if CurrentSubclassModule_ is not None: subclass = getSubclassFromModule_( CurrentSubclassModule_, fimAfastamento) if subclass is not None: return subclass(*args_, **kwargs_) if fimAfastamento.subclass: return fimAfastamento.subclass(*args_, **kwargs_) else: return fimAfastamento(*args_, **kwargs_) factory = staticmethod(factory) def get_dtTermAfast(self): return self.dtTermAfast def set_dtTermAfast(self, dtTermAfast): self.dtTermAfast = dtTermAfast def hasContent_(self): if ( self.dtTermAfast is not None ): return True else: return False def export(self, outfile, level, namespace_='', name_='fimAfastamento', namespacedef_='', pretty_print=True): imported_ns_def_ = GenerateDSNamespaceDefs_.get('fimAfastamento') if imported_ns_def_ is not None: namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' if self.original_tagname_ is not None: name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='fimAfastamento') if self.hasContent_(): outfile.write('>%s' % (eol_, )) self.exportChildren(outfile, level + 1, namespace_='', name_='fimAfastamento', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('</%s%s>%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='fimAfastamento'): pass def exportChildren(self, outfile, level, namespace_='', name_='fimAfastamento', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.dtTermAfast is not None: showIndent(outfile, level, pretty_print) outfile.write('<%sdtTermAfast>%s</%sdtTermAfast>%s' % (namespace_, self.gds_format_date(self.dtTermAfast, input_name='dtTermAfast'), namespace_, eol_)) def build(self, node): already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'dtTermAfast': sval_ = child_.text dval_ = self.gds_parse_date(sval_) self.dtTermAfast = dval_ # end class fimAfastamento class dtTermAfast(GeneratedsSuper): subclass = None superclass = None def __init__(self): self.original_tagname_ = None def factory(*args_, **kwargs_): if CurrentSubclassModule_ is not None: subclass = getSubclassFromModule_( CurrentSubclassModule_, dtTermAfast) if subclass is not None: return subclass(*args_, **kwargs_) if dtTermAfast.subclass: return dtTermAfast.subclass(*args_, **kwargs_) else: return dtTermAfast(*args_, **kwargs_) factory = staticmethod(factory) def hasContent_(self): if ( ): return True else: return False def export(self, outfile, level, namespace_='', name_='dtTermAfast', namespacedef_='', pretty_print=True): imported_ns_def_ = GenerateDSNamespaceDefs_.get('dtTermAfast') if imported_ns_def_ is not None: namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' if self.original_tagname_ is not None: name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='dtTermAfast') if self.hasContent_(): outfile.write('>%s' % (eol_, )) self.exportChildren(outfile, level + 1, namespace_='', name_='dtTermAfast', pretty_print=pretty_print) outfile.write('</%s%s>%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='dtTermAfast'): pass def exportChildren(self, outfile, level, namespace_='', name_='dtTermAfast', fromsubclass_=False, pretty_print=True): pass def build(self, node): already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass # end class dtTermAfast class TIdeEveTrab(GeneratedsSuper): """Identificação do evento""" subclass = None superclass = None def __init__(self, indRetif=None, nrRecibo=None, tpAmb=None, procEmi=None, verProc=None): self.original_tagname_ = None self.indRetif = indRetif self.nrRecibo = nrRecibo self.tpAmb = tpAmb self.procEmi = procEmi self.verProc = verProc def factory(*args_, **kwargs_): if CurrentSubclassModule_ is not None: subclass = getSubclassFromModule_( CurrentSubclassModule_, TIdeEveTrab) if subclass is not None: return subclass(*args_, **kwargs_) if TIdeEveTrab.subclass: return TIdeEveTrab.subclass(*args_, **kwargs_) else: return TIdeEveTrab(*args_, **kwargs_) factory = staticmethod(factory) def get_indRetif(self): return self.indRetif def set_indRetif(self, indRetif): self.indRetif = indRetif def get_nrRecibo(self): return self.nrRecibo def set_nrRecibo(self, nrRecibo): self.nrRecibo = nrRecibo def get_tpAmb(self): return self.tpAmb def set_tpAmb(self, tpAmb): self.tpAmb = tpAmb def get_procEmi(self): return self.procEmi def set_procEmi(self, procEmi): self.procEmi = procEmi def get_verProc(self): return self.verProc def set_verProc(self, verProc): self.verProc = verProc def hasContent_(self): if ( self.indRetif is not None or self.nrRecibo is not None or self.tpAmb is not None or self.procEmi is not None or self.verProc is not None ): return True else: return False def export(self, outfile, level, namespace_='', name_='TIdeEveTrab', namespacedef_='', pretty_print=True): imported_ns_def_ = GenerateDSNamespaceDefs_.get('TIdeEveTrab') if imported_ns_def_ is not None: namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' if self.original_tagname_ is not None: name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='TIdeEveTrab') if self.hasContent_(): outfile.write('>%s' % (eol_, )) self.exportChildren(outfile, level + 1, namespace_='', name_='TIdeEveTrab', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('</%s%s>%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='TIdeEveTrab'): pass def exportChildren(self, outfile, level, namespace_='', name_='TIdeEveTrab', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.indRetif is not None: showIndent(outfile, level, pretty_print) outfile.write('<%sindRetif>%s</%sindRetif>%s' % (namespace_, self.gds_format_integer(self.indRetif, input_name='indRetif'), namespace_, eol_)) if self.nrRecibo is not None: showIndent(outfile, level, pretty_print) outfile.write('<%snrRecibo>%s</%snrRecibo>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.nrRecibo), input_name='nrRecibo')), namespace_, eol_)) if self.tpAmb is not None: showIndent(outfile, level, pretty_print) outfile.write('<%stpAmb>%s</%stpAmb>%s' % (namespace_, self.gds_format_integer(self.tpAmb, input_name='tpAmb'), namespace_, eol_)) if self.procEmi is not None: showIndent(outfile, level, pretty_print) outfile.write('<%sprocEmi>%s</%sprocEmi>%s' % (namespace_, self.gds_format_integer(self.procEmi, input_name='procEmi'), namespace_, eol_)) if self.verProc is not None: showIndent(outfile, level, pretty_print) outfile.write('<%sverProc>%s</%sverProc>%s' % (namespace_, self.gds_encode(self.gds_format_string(quote_xml(self.verProc), input_name='verProc')), namespace_, eol_)) def build(self, node): already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'indRetif': sval_ = child_.text try: ival_ = int(sval_) except (TypeError, ValueError) as exp: raise_parse_error(child_, 'requires integer: %s' % exp) ival_ = self.gds_validate_integer(ival_, node, 'indRetif') self.indRetif = ival_ elif nodeName_ == 'nrRecibo': nrRecibo_ = child_.text nrRecibo_ = self.gds_validate_string(nrRecibo_, node, 'nrRecibo') self.nrRecibo = nrRecibo_ elif nodeName_ == 'tpAmb': sval_ = child_.text try: ival_ = int(sval_) except (TypeError, ValueError) as exp: raise_parse_error(child_, 'requires integer: %s' % exp) ival_ = self.gds_validate_integer(ival_, node, 'tpAmb') self.tpAmb = ival_ elif nodeName_ == 'procEmi': sval_ = child_.text try: ival_ = int(sval_) except (TypeError, ValueError) as exp: raise_parse_error(child_, 'requires integer: %s' % exp) ival_ = self.gds_validate_integer(ival_, node, 'procEmi') self.procEmi = ival_ elif nodeName_ == 'verProc': verProc_ = child_.text verProc_ = self.gds_validate_string(verProc_, node, 'verProc') self.verProc = verProc_ # end class TIdeEveTrab class indRetif(GeneratedsSuper): subclass = None superclass = None def __init__(self): self.original_tagname_ = None def factory(*args_, **kwargs_): if CurrentSubclassModule_ is not None: subclass = getSubclassFromModule_( CurrentSubclassModule_, indRetif) if subclass is not None: return subclass(*args_, **kwargs_) if indRetif.subclass: return indRetif.subclass(*args_, **kwargs_) else: return indRetif(*args_, **kwargs_) factory = staticmethod(factory) def hasContent_(self): if ( ): return True else: return False def export(self, outfile, level, namespace_='', name_='indRetif', namespacedef_='', pretty_print=True): imported_ns_def_ = GenerateDSNamespaceDefs_.get('indRetif') if imported_ns_def_ is not None: namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' if self.original_tagname_ is not None: name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='indRetif') if self.hasContent_(): outfile.write('>%s' % (eol_, )) self.exportChildren(outfile, level + 1, namespace_='', name_='indRetif', pretty_print=pretty_print) outfile.write('</%s%s>%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='indRetif'): pass def exportChildren(self, outfile, level, namespace_='', name_='indRetif', fromsubclass_=False, pretty_print=True): pass def build(self, node): already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass # end class indRetif class nrRecibo(GeneratedsSuper): subclass = None superclass = None def __init__(self): self.original_tagname_ = None def factory(*args_, **kwargs_): if CurrentSubclassModule_ is not None: subclass = getSubclassFromModule_( CurrentSubclassModule_, nrRecibo) if subclass is not None: return subclass(*args_, **kwargs_) if nrRecibo.subclass: return nrRecibo.subclass(*args_, **kwargs_) else: return nrRecibo(*args_, **kwargs_) factory = staticmethod(factory) def hasContent_(self): if ( ): return True else: return False def export(self, outfile, level, namespace_='', name_='nrRecibo', namespacedef_='', pretty_print=True): imported_ns_def_ = GenerateDSNamespaceDefs_.get('nrRecibo') if imported_ns_def_ is not None: namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else:
<reponame>learning310/U-Time """ """ import os import numpy as np from argparse import ArgumentParser, Namespace from utime.bin.evaluate import (set_gpu_vis, get_and_load_one_shot_model, get_logger) from utime.hypnogram.utils import dense_to_sparse from utime.io.channels import infer_channel_types, VALID_CHANNEL_TYPES from utime.io.channels import auto_infer_referencing as infer_channel_refs from utime import Defaults from pprint import pformat from collections import namedtuple def get_argparser(): """ Returns an argument parser for this script """ parser = ArgumentParser(description='Predict using a U-Time model.') parser.add_argument("-f", type=str, required=True, help='Path to file to predict on.') parser.add_argument("-o", type=str, required=True, help="Output path for storing predictions. " "Valid extensions are '.hyp' (text file with 1 stage (string) per line), " "'.ids' (init-duration-stage (string) format text file) and '.npy' (numpy array " "of shape [N, 1] storing stages (ints)). " "If any other or no extension is specified, '.npy' is assumed.") parser.add_argument("--header_file_name", type=str, default=None, help='Optional header file name. Header must be in the same folder of the input file, see -f.') parser.add_argument("--logging_out_path", type=str, default=None, help='Optional path to store prediction log. If not set, <out_folder>/<file_name>.log is used.') parser.add_argument("--channels", nargs='+', type=str, default=None, required=True, help="A list of channels to use for prediction. " "To predict on multiple channel groups, pass a string where " "each channel in each channel group is separated by '++' and different groups are " "separated by space or '&&'. E.g. to predict on {EEG1, EOG1} and {EEG2, EOG2}, pass " "'EEG1++EOG1' 'EEG2++EOG2'. Each group will be used for prediction once, and the final " "results will be a majority vote across all. " "You may also specify a list of individual channels and use the --auto_channel_grouping to" " predict on all channel group combinations possible by channel types. " "You may optionally also specify channel types using general channel declarations " "['EEG', 'EOG', 'EMG'] which will be considered when using the --auto_channel_grouping " "flag. Use '<channel_name>==<channel_type>', e.g. 'C3-A2==EEG' 'EOGl==EOG'.") parser.add_argument("--auto_channel_grouping", nargs="+", type=str, default=None, help="Attempt to automatically group all channels specified with --channels into channel " "groups by types. Pass a string of format '<type_1> <type_2>' (optional && separaters) " "using the general channel types declarations ['EEG', 'EOG', 'EMG']. " "E.g. to predict on all available channel groups with 1 EEG and 1 EOG channel " "(in that order), pass '--auto_channel_grouping=EEG EOG' and all channels to consider " "with the --channels argument. Channel types may be passed with --channels (see above), " "otherwise, channel types are automatically inferred from the channel names. " "Note that not all models are designed to work with all types, e.g. U-Sleep V1.0 " "does not need EMG inputs and should not be passed.") parser.add_argument("--auto_reference_types", nargs='+', type=str, default=None, help="Attempt to automatically reference channels to MASTOID typed channels. Pass channel " "types in ['EEG', 'EOG'] for which this feature should be active. E.g., with " "--channels C3 C4 A1 A2 passed and --auto_reference_types EEG set, the referenced " "channels C3-A2 and C4-A1 will be used instead.") parser.add_argument("--strip_func", type=str, default='trim_psg_trailing', help="Strip function to use, default = 'trim_psg_trailing'.") parser.add_argument("--model", type=str, default=None, help="Specify a model by string identifier of format <model_name>:<model_version> " "available in the U-Sleep package. OBS: The U-Sleep package must be installed or an " "error is raised. Cannot specify both --model and --project_dir") parser.add_argument("--project_dir", type=str, default=None, help='Path to U-Time project folder. Cannot specify both --model and --project_dir. ' 'If neither are specified, --project_dir defaults to the current working directory.') parser.add_argument("--data_per_prediction", type=int, default=None, help='Number of samples that should make up each sleep' ' stage scoring. Defaults to sample_rate*30, ' 'giving 1 segmentation per 30 seconds of signal. ' 'Set this to 1 to score every data point in the ' 'signal.') parser.add_argument("--num_GPUs", type=int, default=1, help="Number of GPUs to use for this job") parser.add_argument("--force_GPU", type=str, default="") parser.add_argument("--no_argmax", action="store_true", help="Do not argmax prediction volume prior to save.") parser.add_argument("--weights_file_name", type=str, required=False, help="Specify the exact name of the weights file " "(located in <project_dir>/model/) to use.") return parser def get_processed_args(args): """ Validate and prepare args. Returns a new set of args with potential modifications. Returns: Path to a validated project directory as per --project_dir. """ modified_args = {} for key, value in vars(args).items(): if isinstance(value, list): # Allow list-like arguments to be passed either space-separated as normally, # or using '&&' delimiters. This is useful e.g. when using Docker. split_list = [] for item in value: split_list.extend(map(lambda s: s.strip(), item.split("&&"))) value = split_list modified_args[key] = value args = Namespace(**modified_args) assert args.num_GPUs >= 0, "--num_GPUs must be positive or 0." if args.model: if args.project_dir is not None: raise ValueError("Specifying both the --model and --project_dir flags is " "ambiguous and is not allowed.") try: import usleep except ImportError as e: raise RuntimeError("Cannot use the --model flag when the U-Sleep package is " "not installed.") from e model_name, model_version = args.model.split(":") project_dir = usleep.get_model_path(model_name, model_version) else: project_dir = os.path.abspath(args.project_dir or "./") # Check project folder is valid from utime.utils.scriptutils.scriptutils import assert_project_folder assert_project_folder(project_dir, evaluation=True) args.project_dir = project_dir # Set absolute input file path args.f = os.path.abspath(args.f) # Check header exists if specified if args.header_file_name and not os.path.exists(os.path.join(os.path.split(args.f)[0], args.header_file_name)): raise ValueError(f"Could not find header file with name {args.header_file_path} in the " f"folder where input file {args.f} is stored.") # Set output file path if os.path.isdir(args.o): args.o = os.path.join(args.o, os.path.splitext(os.path.split(args.f)[-1])[0] + ".npy") # Set logging out path default_log_file_path = os.path.splitext(args.o)[0] + ".log" if args.logging_out_path is None: args.logging_out_path = default_log_file_path elif os.path.isdir(args.logging_out_path): args.logging_out_path = os.path.join(args.logging_out_path, os.path.split(default_log_file_path)[-1]) if args.auto_channel_grouping is not None: # Check if --auto_channel_grouping has correct format and at least 2 groups assert len(args.auto_channel_grouping) > 1, "Should specify at least 2 channel type groups " \ "with parameter --auto_channel_grouping, " \ f"e.g. 'EEG' 'EOG', but got {args.auto_channel_grouping}" return args def predict_study(study, model, channel_groups, no_argmax, logger=print): psg = np.expand_dims(study.get_all_periods(), 0) pred = None for channel_group in channel_groups: logger("--- Channel names: {}\n" " Channel inds: {}".format(channel_group.channel_names, channel_group.channel_indices)) # Get PSG for particular group psg_subset = psg[..., tuple(channel_group.channel_indices)] logger(" Extracted PSG shape: {}".format(psg_subset.shape)) if pred is None: pred = model.predict_on_batch(psg_subset) else: # Sum into if using multiple channel groups pred += model.predict_on_batch(psg_subset) pred = pred.numpy().reshape(-1, pred.shape[-1]) if no_argmax: return pred else: return np.expand_dims(pred.argmax(-1), -1) def save_hyp(path, pred, **kwargs): """ Save predictions as stage strings with 1 stage (segment) per line in a plain text file. """ # Map integer outputs to string stages stage_strings = np.vectorize(Defaults.get_class_int_to_stage_string().get)(pred.ravel()) with open(path, "w") as out_f: out_f.write("\n".join(stage_strings)) def save_ids(path, pred, period_length_sec, **kwargs): """ Save predictions as stage strings in init-duration-stage format in a plain text file. """ # Map integer outputs to string stages stage_strings = np.vectorize(Defaults.get_class_int_to_stage_string().get)(pred.ravel()) ids = dense_to_sparse(stage_strings, period_length_sec, allow_trim=True) with open(path, "w") as out_f: for i, d, s in ids: out_f.write(f"{i},{d},{s}\n") def save_npy(path, pred, **kwargs): """ Save predictions as a numpy file storing a [N, 1] array of integer stages """ np.save(path, pred.reshape(len(pred), -1)) def save_prediction(pred, out_path, period_length_sec, no_argmax, logger): dir_, fname = os.path.split(out_path) if dir_: os.makedirs(dir_, exist_ok=True) basename, ext = os.path.splitext(fname) if ext == ".hyp": # Save as plain text of 1 stage per line out_path = os.path.join(dir_, basename + ext) save_func = save_hyp assert not no_argmax, "Cannot save to .hyp format with the --no_argmax flag. Please use .npy." elif ext == ".ids": # Save as plain text in IDS format out_path = os.path.join(dir_, basename + ext) save_func = save_ids assert not no_argmax, "Cannot save to .ids format with the --no_argmax flag. Please use .npy." else: # Save as npy out_path = os.path.join(dir_, basename + ".npy") save_func = save_npy # Save pred to disk logger("* Saving prediction array of shape {} to {}".format( pred.shape, out_path )) logger(f"* Using save function: {save_func.__name__}") save_func(out_path, pred, period_length_sec=period_length_sec) def split_channel_types(channels): """ TODO Args: channels: list of channel names Returns: stripped channels channel_types """ stripped, types = [], [] for channel in channels: type_ = None if "==" in channel: channel, type_ = channel.split("==") type_ = type_.strip().upper() if type_ not in VALID_CHANNEL_TYPES: raise ValueError(f"Invalid channel type '{type_}' specified for channel '{channel}'. " f"Valid are: {VALID_CHANNEL_TYPES}") types.append(type_) stripped.append(channel) return stripped, types def unpack_channel_groups(channels): """ TODO """ channels_to_load, channel_groups = [], [] grouped = map(lambda chan: "++" in chan, channels) if all(grouped): for
#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from lark import Lark, Transformer, Tree, Token from collections import namedtuple import typing import json BNF = r''' OCTALDIGIT: "0..7" IDENT: ( "_" )* LETTER ( LETTER | DECIMALDIGIT | "_" )* FULLIDENT: IDENT ( "." IDENT )* MESSAGENAME: IDENT ENUMNAME: IDENT FIELDNAME: IDENT ONEOFNAME: IDENT MAPNAME: IDENT SERVICENAME: IDENT TAGNAME: IDENT TAGVALUE: IDENT RPCNAME: IDENT QUALIFIER: ( "stream" ) MESSAGETYPE: [ "." ] ( IDENT "." )* MESSAGENAME ENUMTYPE: [ "." ] ( IDENT "." )* ENUMNAME INTLIT : DECIMALLIT | OCTALLIT | HEXLIT DECIMALLIT: ( "1".."9" ) ( DECIMALDIGIT )* OCTALLIT : "0" ( OCTALDIGIT )* HEXLIT : "0" ( "x" | "X" ) HEXDIGIT ( HEXDIGIT )* FLOATLIT: ( DECIMALS "." [ DECIMALS ] [ EXPONENT ] | DECIMALS EXPONENT | "."DECIMALS [ EXPONENT ] ) | "inf" | "nan" DECIMALS : DECIMALDIGIT ( DECIMALDIGIT )* EXPONENT : ( "e" | "E" ) [ "+" | "-" ] DECIMALS BOOLLIT: "true" | "false" STRLIT: ( "'" ( CHARVALUE )* "'" ) | ( "\"" ( CHARVALUE )* "\"" ) CHARVALUE: HEXESCAPE | OCTESCAPE | CHARESCAPE | /[^\0\n\\]/ HEXESCAPE: "\\" ( "x" | "X" ) HEXDIGIT HEXDIGIT OCTESCAPE: "\\" OCTALDIGIT OCTALDIGIT OCTALDIGIT CHARESCAPE: "\\" ( "a" | "b" | "f" | "n" | "r" | "t" | "v" | "\\" | "'" | "\"" ) QUOTE: "'" | "\"" EMPTYSTATEMENT: ";" CONSTANT: FULLIDENT | ( [ "-" | "+" ] INTLIT ) | ( [ "-" | "+" ] FLOATLIT ) | STRLIT | BOOLLIT syntax: "syntax" "=" QUOTE "proto3" QUOTE ";" import: "import" [ "weak" | "public" ] STRLIT ";" package: "package" FULLIDENT ";" option: "option" OPTIONNAME "=" CONSTANT ";" OPTIONNAME: ( IDENT | "(" FULLIDENT ")" ) ( "." IDENT )* TYPE: "double" | "float" | "int32" | "int64" | "uint32" | "uint64" | "sint32" | "sint64" | "fixed32" | "fixed64" | "sfixed32" | "sfixed64" | "bool" | "string" | "bytes" | MESSAGETYPE | ENUMTYPE FIELDNUMBER: INTLIT field: [ comments ] TYPE FIELDNAME "=" FIELDNUMBER [ "[" fieldoptions "]" ] TAIL fieldoptions: fieldoption ( "," fieldoption )* fieldoption: OPTIONNAME "=" CONSTANT repeatedfield: [ comments ] "repeated" field oneof: "oneof" ONEOFNAME "{" ( oneoffield | EMPTYSTATEMENT )* "}" oneoffield: TYPE FIELDNAME "=" FIELDNUMBER [ "[" fieldoptions "]" ] ";" mapfield: [ comments ] "map" "<" KEYTYPE "," TYPE ">" MAPNAME "=" FIELDNUMBER [ "[" fieldoptions "]" ] TAIL KEYTYPE: "int32" | "int64" | "uint32" | "uint64" | "sint32" | "sint64" | "fixed32" | "fixed64" | "sfixed32" | "sfixed64" | "bool" | "string" reserved: "reserved" ( ranges | fieldnames ) ";" ranges: range ( "," range )* range: INTLIT [ "to" ( INTLIT | "max" ) ] fieldnames: FIELDNAME ( "," FIELDNAME )* enum: [ comments ] "enum" ENUMNAME enumbody enumbody: "{" ( enumfield | EMPTYSTATEMENT )* "}" enumfield: [ COMMENTS ] IDENT "=" INTLIT [ "[" enumvalueoption ( "," enumvalueoption )* "]" ] TAIL enumvalueoption: OPTIONNAME "=" CONSTANT message: [ comments ] "message" MESSAGENAME messagebody messagebody: "{" ( repeatedfield | field | enum | message | option | oneof | mapfield | reserved | EMPTYSTATEMENT )* "}" googleoption: "option" "(google.api.http)" "=" "{" [ "post:" CONSTANT [ "body:" CONSTANT ] ] "}" ";" service: [ comments ] "service" SERVICENAME "{" ( option | rpc | EMPTYSTATEMENT )* "}" rpc: [ comments ] "rpc" RPCNAME "(" ( QUALIFIER )* MESSAGETYPE ")" "returns" "(" ( QUALIFIER )* MESSAGETYPE ")" ( ( "{" ( googleoption | option | EMPTYSTATEMENT )* "}" ) | ";" ) proto:[ comments ] syntax ( import | package | option | topleveldef | EMPTYSTATEMENT )* topleveldef: message | enum | service | comments TAIL: ";" [/[\s|\t]/] [ COMMENT ] COMMENT: "//" /.*/ [ "\n" ] comments: COMMENT ( COMMENT )* COMMENTS: COMMENT ( COMMENT )* %import common.HEXDIGIT %import common.DIGIT -> DECIMALDIGIT %import common.LETTER %import common.WS %import common.NEWLINE %ignore WS ''' Comment = typing.NamedTuple('Comment', [('content', str), ('tags', typing.Dict[str, typing.Any])]) Field = typing.NamedTuple('Field', [('comment', 'Comment'), ('type', str), ('key_type', str), ('val_type', str), ('name', str), ('number', int)]) Enum = typing.NamedTuple('Enum', [('comment', 'Comment'), ('name', str), ('fields', typing.Dict[str, 'Field'])]) Message = typing.NamedTuple('Message', [('comment', 'Comment'), ('name', str), ('fields', typing.List['Field']), ('messages', typing.Dict[str, 'Message']), ('enums', typing.Dict[str, 'Enum'])]) Service = typing.NamedTuple('Service', [('name', str), ('functions', typing.Dict[str, 'RpcFunc'])]) RpcFunc = typing.NamedTuple('RpcFunc', [('name', str), ('in_stream', bool), ('in_type', str), ('out_stream', bool), ('out_type', str), ('uri', str)]) ProtoFile = typing.NamedTuple('ProtoFile', [('messages', typing.Dict[str, 'Message']), ('enums', typing.Dict[str, 'Enum']), ('services', typing.Dict[str, 'Service']), ('imports', typing.List[str]), ('options', typing.Dict[str, str]), ('package', str)]) class ProtoTransformer(Transformer): '''Converts syntax tree token into more easily usable namedtuple objects''' def message(self, tokens): '''Returns a Message namedtuple''' comment = Comment("", {}) if len(tokens) < 3: name_token, body = tokens else: comment, name_token, body = tokens return Message(comment, name_token.value, *body) def messagebody(self, items): '''Returns a tuple of message body namedtuples''' messages = {} enums = {} fields = [] for item in items: if isinstance(item, Message): messages[item.name] = item elif isinstance(item, Enum): enums[item.name] = item elif isinstance(item, Field): fields.append(item) return fields, messages, enums def field(self, tokens): '''Returns a Field namedtuple''' comment = Comment("", {}) type = Token("TYPE", "") fieldname = Token("FIELDNAME", "") fieldnumber = Token("FIELDNUMBER", "") for token in tokens: if isinstance(token, Comment): comment = token elif isinstance(token, Token): if token.type == "TYPE": type = token elif token.type == "FIELDNAME": fieldname = token elif token.type == "FIELDNUMBER": fieldnumber = token elif token.type == "COMMENT": comment = Comment(token.value, {}) return Field(comment, type.value, type.value, type.value, fieldname.value, int(fieldnumber.value)) def repeatedfield(self, tokens): '''Returns a Field namedtuple''' comment = Comment("", {}) if len(tokens) < 2: field = tokens[0] else: comment, field = tuple(tokens) return Field(comment, 'repeated', field.type, field.type, field.name, field.number) def mapfield(self, tokens): '''Returns a Field namedtuple''' comment = Comment("", {}) val_type = Token("TYPE", "") key_type = Token("KEYTYPE", "") fieldname = Token("MAPNAME", "") fieldnumber = Token("FIELDNUMBER", "") for token in tokens: if isinstance(token, Comment): comment = token elif isinstance(token, Token): if token.type == "TYPE": val_type = token elif token.type == "KEYTYPE": key_type = token elif token.type == "MAPNAME": fieldname = token elif token.type == "FIELDNUMBER": fieldnumber = token elif token.type == "COMMENT": comment = Comment(token.value, {}) return Field(comment, 'map', key_type.value, val_type.value, fieldname.value, int(fieldnumber.value)) def comments(self, tokens): '''Returns a Tag namedtuple''' comment = '' tags = {} for token in tokens: comment += token if token.find('@') < 0: continue kvs = token.strip(" /\n").split('@') for kv in kvs: kv = kv.strip(" /\n") if not kv: continue tmp = kv.split('=') key = tmp[0].strip(" /\n").lower() if key.find(" ") >= 0: continue if len(tmp) > 1: tags[key] = tmp[1].lower() else: tags[key] = True return Comment(comment, tags) def enum(self, tokens): '''Returns an Enum namedtuple''' comment = Comment("", {}) if len(tokens) < 3: name, fields = tokens else: comment, name, fields = tokens return Enum(comment, name.value, fields) def enumbody(self, tokens): '''Returns a sequence of enum identifiers''' enumitems = [] for tree in tokens: if tree.data != 'enumfield': continue comment = Comment("", {}) name = Token("IDENT", "") value = Token("INTLIT", "") for token in tree.children: if isinstance(token, Comment): comment = token elif isinstance(token, Token): if token.type == "IDENT": name = token elif token.type == "INTLIT": value = token elif token.type == "COMMENTS": comment = Comment(token.value, {}) enumitems.append(Field(comment, 'enum', 'enum', 'enum', name.value, value.value)) return enumitems def service(self, tokens): '''Returns a Service namedtuple''' functions = [] name = '' for i in range(0, len(tokens)): if not isinstance(tokens[i], Comment): if isinstance(tokens[i], RpcFunc): functions.append(tokens[i]) else: name = tokens[i].value return Service(name, functions) def rpc(self, tokens): '''Returns a RpcFunc namedtuple''' uri = '' in_type = '' in_stream = False out_stream = False for token in tokens: if isinstance(token, Token): if token.type == "RPCNAME": name = token elif token.type == "MESSAGETYPE": if in_type: out_type = token else: in_type = token elif token.type == "QUALIFIER": if in_type: