input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<filename>python/nvh.py
#!/usr/bin/python3
# Copyright (c) 2019-2022 Lexical Computing: <NAME>, <NAME>, <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import sys, re, os, fileinput
from urllib.parse import quote_plus
import hashlib
def get_filename(value, extension='.nvh'):
enc_value = quote_plus(value)
if len(enc_value) > 100:
enc_value = hashlib.md5(value.encode('utf8')).hexdigest()
return enc_value + extension
class nvh:
def __init__ (self, parent, indent="", name="", value="", children = None):
self.parent = parent
self.name = name
self.value = value
self.indent = indent
self.children = children or []
def __repr__(self):
return "%s: %s" % (self.name, self.value)
def dump (self, out, do_projection = False):
if hasattr(self, "do_projection"):
delattr(self, "do_projection")
return self.dump(out, True)
if self.name:
if self.parent and self.parent.parent and self.parent.parent.name:
# force consistent indent
ind_step = self.parent.indent[len(self.parent.parent.indent):]
self.indent = self.parent.indent + ind_step
elif self.parent and self.parent.name:
self.indent = self.parent.children[0].indent
out.write(self.indent + self.name + ": " + self.value + "\n")
for c in self.children:
project = getattr(c, "project", 0)
if not do_projection or project:
c.dump (out, project == 1)
def filter_entries (self, selectors, projectors, maxitems=0):
def prepare_selector(q):
m = re.match("((?:[a-zA-Z._](?:#[<>=]+\d+)?)+) *(?:(=|!=|~=)(.*))?$", q)
# e.g. 'hw.sense.example#=0.quality=bad'
if not m:
raise Exception("Invalid query: '%s'" % q)
return m.groups()
self.refresh()
children = [c for c in self.children if all(c.selection(*prepare_selector(f)) for f in selectors)]
if maxitems:
children = children[:maxitems]
d = nvh(self.parent, self.indent, self.name, self.value, children)
if projectors:
for p in projectors:
d.prepare_projection(p)
d.do_projection = True
return d
def refresh(self):
if hasattr(self, 'selected'):
delattr(self, 'selected')
if hasattr(self, 'project'):
delattr(self, 'project')
if hasattr(self, "do_projection"):
delattr(self, "do_projection")
for c in self.children:
c.refresh()
def prepare_projection (self, q):
out = False
for c in self.children:
if "." in q:
name, qq = q.split(".", 1)
if c.name == name and getattr(c, "selected", 1):
if c.prepare_projection(qq):
c.project = getattr(c, "project", 1)
out = True
elif (c.name == q or c.name+'!' == q) \
and getattr(c, "selected", 1):
c.project = 2
out = True
if q.endswith('!') and not out:
self.project = 0
return out
def selection (self, lhs, operator, rhs, mark_selected=1):
is_selected = getattr(self, "selected", 1)
if "." in lhs: # dive in
name, new_lhs = lhs.split(".", 1)
if "." in new_lhs:
nextname, rest = new_lhs.split(".", 1)
new_lhs = nextname.split("#")[0] + "." + rest
else:
nextname, rest = new_lhs, ''
new_lhs = nextname.split("#")[0]
if self.name != name:
return False
elif "#" in nextname: # count
nextname, condition = nextname.split('#', 1)
if condition.startswith('='):
condition = condition.replace("=", "==")
new_mark_selected = 0
else:
new_mark_selected = 1
condition = ">0"
numof_children = len([c for c in self.children
if c.selection(new_lhs, operator, rhs, new_mark_selected)])
if eval("%d%s" % (numof_children, condition)):
if mark_selected: self.selected = is_selected
return True
else:
if mark_selected: self.selected = 0
return False
elif self.name != lhs: # nothing, names don't match
return False
# now, it's guaranteed that self.name == lhs
if operator == "!=":
if self.value != rhs:
if mark_selected: self.selected = is_selected
return True
if mark_selected: self.selected = 0
return False
if operator == "~=":
if re.match(rhs, self.value):
if mark_selected: self.selected = is_selected
return True
if mark_selected: self.selected = 0
return False
if operator == "=":
if self.value == rhs:
if mark_selected: self.selected = is_selected
return True
if mark_selected: self.selected = 0
return False
if mark_selected: self.selected = is_selected
return True
def merge (self, patch, replacers, print_counts=True):
def new_replacers(replacers, childname):
parsed_reps = [r.split(".", 1) for r in replacers if "." in r]
return [r[1] for r in parsed_reps if childname == r[0]]
from collections import defaultdict
self_hash = defaultdict(list)
for i, c in enumerate(self.children):
self_hash[c.value].append(c)
added = 0
updated = 0
processed_names = set()
for pc in patch.children:
if pc.name + '!' in replacers:
self.children = [x for x in self.children
if x.name != pc.name or x.value != pc.value]
continue
if pc.name in replacers: # replacing existing records
if pc.name in processed_names:
continue
ii = len(self.children) # index where to put pc.name
for i, c in enumerate(self.children):
if c.name == pc.name:
ii = i
break
self.children = [x for x in self.children
if x.name != pc.name]
for pc2 in patch.children: # add all nodes with same name
if pc2.name == pc.name:
self.children.insert(ii, pc2)
ii += 1
pc2.parent = self
processed_names.add(pc.name)
updated += 1
continue
merged = False
for c in self_hash[pc.value]:
if c.value == pc.value and c.name == pc.name:
c.merge(pc, new_replacers(replacers, c.name), False)
merged = True
updated += 1
if not merged: # find right place and insert
ii = len(self.children)
for i, c in enumerate(self.children):
if c.name == pc.name:
ii = i
self.children.insert(ii+1, pc)
pc.parent = self
self_hash[pc.value].append(pc)
added += 1
for rep in replacers:
if not '.' in rep and rep not in processed_names: # deleting
self.children = [x for x in self.children if x.name != rep]
updated += 1
if print_counts:
print("Added %d entries, updated %d entries" % (added, updated),
file=sys.stderr)
def split (self, outdir):
for c in self.children:
outfile = open("%s/%s" % (outdir, get_filename(c.value)), "w")
c.dump(outfile)
def generate_schema (self, schema, firstParent = True):
seen = {n: False for n in schema}
firstInThisParent = {c.name: True for c in self.children}
for c in self.children:
is_this_new = firstInThisParent[c.name] and firstParent
if c.name not in schema: # first occurrence across all parents
schema[c.name] = {"optional": not firstParent, "repeated": False, "schema": {}}
is_this_new = True
elif not firstInThisParent[c.name]:
schema[c.name]["repeated"] = True
seen[c.name] = True
c.generate_schema(schema[c.name]["schema"], is_this_new)
firstInThisParent[c.name] = False
for n in seen:
if not seen[n]:
schema[n]["optional"] = True
def check_schema(self, schema, parent="ROOT", ancestor=None, outfile=sys.stdout):
def report(s):
outfile.write("ERROR: %s (%s)\n" % (s, ancestor))
from collections import Counter
keyval_freqs = Counter((c.name, c.value) for c in self.children)
duplicates = [d for d in keyval_freqs.items() if d[1] > 1]
for d in duplicates:
report("Duplicate key-value pair '%s: %s' for parent %s (occurs %d times)" % (d[0][0], d[0][1], parent, d[1]))
freqs = Counter(c.name for c in self.children)
for n in freqs:
if n not in schema:
report("%s not allowed as a child of %s" % (n, parent))
continue
if freqs[n] > 1 and not schema[n]["repeated"]:
report("%s not allowed to be repeated" % n)
for n in schema:
if not schema[n]["optional"] and not n in freqs:
report("%s is mandatory and missing as child of %s" % (n, parent))
for c in self.children:
if c.name in schema:
c.check_schema(schema[c.name]["schema"], c.name, ancestor or "%s: %s" % (c.name, c.value), outfile=outfile)
def nvh2schema (self):
schema = {}
for c in self.children:
if c.name in schema:
raise Exception("Invalid schema: %s specified multiple times" % c.name)
node_def = {}
if c.value == "":
node_def["optional"] = False
node_def["repeated"] = False
elif c.value == "?":
node_def["optional"] = True
node_def["repeated"] = False
elif c.value == "+":
node_def["optional"] = False
node_def["repeated"] = True
elif c.value == "*":
node_def["optional"] = True
node_def["repeated"] = True
else:
raise Exception("Invalid schema: %s has invalid value '%s'" % (c.name, c.value))
node_def["schema"] = c.nvh2schema()
schema[c.name] = node_def
return schema
@staticmethod
def print_schema (s, indent = 0, outfile = sys.stdout):
def get_symbol(d):
if d["optional"]:
if d["repeated"]:
return "*"
return "?"
elif d["repeated"]:
return "+"
return ""
for k in s:
print("%s%s: %s" % (" " * indent, k, get_symbol(s[k])), file=outfile)
| |
<reponame>aParthemer/MidiCompose
from __future__ import annotations
from copy import deepcopy
from dataclasses import dataclass, field, Field
from itertools import cycle
from typing import Set, List, Union, Sequence, Tuple, Any, Optional
from enum import Enum
from icecream import ic
from MidiCompose.logic.harmony.interval import Interval
from MidiCompose.logic.harmony.note import Note, HasNotes, to_note, sequence_to_notes
class KeySchema(Enum):
IONIAN = MAJOR = (2, 2, 1, 2, 2, 2)
DORIAN = (2, 1, 2, 2, 2, 1)
PHRYGIAN = (1, 2, 2, 2, 1, 2)
LYDIAN = (2, 2, 2, 1, 2, 2)
MIXOLYDIAN = (2, 2, 1, 2, 2, 1)
AEOLIAN = MINOR = (2, 1, 2, 2, 1, 2)
LOCRIAN = (1, 2, 2, 1, 2, 2)
MELODIC_MINOR = (2, 1, 2, 2, 2, 2)
LYDIAN_DOMINANT = (2, 2, 2, 1, 2, 1)
ALTERED = (1, 2, 1, 2, 2, 2)
HARMONIC_MINOR = (2, 1, 2, 2, 1, 3)
HARMONIC_MAJOR = (2, 2, 1, 2, 1, 3)
WHOLE_TONE = (2, 2, 2, 2, 2)
DIMINISHED_HW = (1, 2, 1, 2, 1, 2, 1)
DIMINISHED_WH = (2, 1, 2, 1, 2, 1, 2)
CHROMATIC = (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
@classmethod
def parse(cls, value: Any) -> KeySchema:
if isinstance(value, KeySchema):
return value
elif isinstance(value, str):
try:
return KeySchema[value.upper()]
except:
e = f"Invalid `KeySchema` string representation. Possible inputs are: {[v.name for v in cls]}"
raise ValueError(e)
@staticmethod
def all_diatonic(exclude: KeySchema | Sequence[KeySchema | Any] = None) -> List[KeySchema]:
if exclude is not None:
if isinstance(exclude,KeySchema):
_exclude = [exclude]
else:
try:
_exclude = [KeySchema.parse(ks) for ks in exclude]
except:
raise
else:
_exclude = []
diatonic = [KeySchema.MAJOR,KeySchema.DORIAN,KeySchema.PHRYGIAN,
KeySchema.LYDIAN,KeySchema.MIXOLYDIAN,KeySchema.AEOLIAN,
KeySchema.LOCRIAN]
return [ks for ks in diatonic if ks not in _exclude]
class Key:
def __init__(self,
tonic: Note | Any,
key_schema: KeySchema | str = None):
if key_schema is None:
_parsed = self.parse(value=tonic)
tonic, key_schema = _parsed["tonic"], _parsed["key_schema"]
self.tonic = tonic # calls setter
self.key_schema = key_schema # setter
@classmethod
def parse(cls, value: Any) -> dict:
"""
Parse various input types as Key.
returns kwargs to pass to instance attributes in __init__ method.
Possible inputs include:
- Key object -> returns unchanged
- Note object -> returns Major scale with give Note as tonic if no key schema given.
- string representation with the form: <NOTE_STR> [<KEY_SCHEMA_STR>='MAJOR'] (e.g. "C HARMONIC_MINOR")
"""
DEFAULT_SCHEMA = KeySchema["MAJOR"]
if isinstance(value, Key):
_parsed = {"tonic": value.tonic, "key_schema": value.key_schema}
elif isinstance(value, Note):
_parsed = {"tonic": value, "key_schema": DEFAULT_SCHEMA}
elif isinstance(value, str):
_split_value = value.split(sep=" ")
if len(_split_value) == 2: # interpret as <tonic> <KeySchema>
try:
_tonic = Note(_split_value[0].title())
_key_schema = KeySchema.parse(_split_value[1])
_parsed = {"tonic": _tonic, "key_schema": _key_schema}
except:
raise
elif len(_split_value) == 1: # interpret as <tonic> with default KeySchema
try:
_tonic = Note(_split_value[0].title())
_parsed = {"tonic": _tonic, "key_schema": DEFAULT_SCHEMA}
except:
raise
else:
e = "Invalid string representation of `Key` object. Should have the form: '<tonic> [<key_schema>=MAJOR]'."
raise ValueError(e)
elif isinstance(value, int):
try:
_tonic = Note(value)
_parsed = {"tonic": _tonic, "key_schema": DEFAULT_SCHEMA}
except:
raise
else:
e = f"Failed to parse input `{value}` as a Key object."
raise ValueError(e)
return _parsed
@property
def tonic(self) -> Note:
return self._tonic
@tonic.setter
def tonic(self, value):
if not isinstance(value, Note):
try:
_tonic = Note(value)
except:
raise
else:
_tonic = value
self._tonic = _tonic
@property
def key_schema(self) -> KeySchema:
return self._key_schema
@key_schema.setter
def key_schema(self, value):
if isinstance(value, KeySchema):
_key_schema = value
elif isinstance(value, str):
try:
_key_schema = KeySchema[value]
except:
e = f"The argument given for `key_schema` : `{value}` is not a valid KeySchema value. Supported arguments include: {[v.name for v in KeySchema]}."
raise ValueError(e)
else:
e = f"Parameter `key_schema` must be given a valid KeySchema object or string representation."
raise TypeError(e)
self._key_schema = _key_schema
@property
def key_name(self) -> str:
return self.key_schema.name
@property
def schema(self) -> Tuple[int]:
return self.key_schema.value
@property
def notes(self) -> List[Note]:
# get major scale based on tonic
_family = Note(self.tonic.value % 12)
_scale = [_family]
for interv in self.schema:
next_note = Note((_scale[-1].value + interv) % 12)
_scale.append(next_note)
return _scale
def get_neighbors(self,
depth=1) -> List[Tuple[Key, Key]]:
"""
Returns list of tuples where first element of each tuple is the 'lower neighbor' (P5 below self) \
and the second element is the `upper neighbor` (P5 above self).
:param depth: number of neighbor sets to generate.
"""
if depth not in range(1,7):
e = "`depth` must be between 1 and 6"
raise ValueError(e)
_tonic = self.tonic
_neighbors = []
for _ in range(depth):
if len(_neighbors) == 0:
lower_tonic = Interval("P4").above(_tonic)
upper_tonic = Interval("P5").above(_tonic)
lower_neighbor = Key(tonic=lower_tonic, key_schema=self.key_schema)
upper_neighbor = Key(tonic=upper_tonic, key_schema=self.key_schema)
_neighbors.append((lower_neighbor, upper_neighbor))
else:
lower_tonic = Interval("P4").above(_neighbors[-1][0].tonic)
upper_tonic = Interval("P5").above(_neighbors[-1][1].tonic)
lower_neighbor = Key(tonic=lower_tonic, key_schema=self.key_schema)
upper_neighbor = Key(tonic=upper_tonic, key_schema=self.key_schema)
_neighbors.append((lower_neighbor, upper_neighbor))
return _neighbors
def get_index_of(self,
note: Union[Note, int, str]) -> int:
try:
_note = to_note(note)
except:
raise
if _note not in self:
e = f"Given from_note `{note}` is not in Key `{self.tonic}`"
raise ValueError(e)
_note = Note(_note.value % 12)
return self.notes.index(_note)
def next_note(self,
from_note: Union[Note, int, str],
step: int = 1) -> Note:
if step <= 0:
e = "Argument `step` must be >= 1."
raise ValueError(e)
else:
try:
_note = to_note(from_note)
except:
raise
try:
_idx = self.get_index_of(from_note)
except:
raise
for _step in range(step):
_idx = (_idx + 1) % len(self.notes)
_next_note = self[_idx]
return _next_note
def previous_note(self,
note: Union[Note, int, str],
step: int = 1) -> Note:
if step <= 0:
e = "Argument `step` must be >= 1."
raise ValueError(e)
else:
try:
_note = to_note(note)
except:
raise
try:
_idx = self.get_index_of(note)
except:
raise
for _step in range(step):
_idx = (_idx - 1) % len(self.notes)
_previous_note = self[_idx]
return _previous_note
def note_range(self,
a: Note | Any,
b: Note| Any) -> List[Note]:
"""
order is preserved
"""
try:
a, b = [to_note(n) for n in [a, b]]
except:
raise
if a == b:
return [a]
else:
chrom_range = Note.range(a, b)
rng = [n for n in chrom_range if n in self]
return rng
def steps_above(self,
note: Union[Note, int, str],
steps: int) -> Note:
"""
Get the Note which is "N" steps away from the given from_note in the context of the current Key.
"""
try:
_note = to_note(note)
except:
raise
try:
_idx = self.get_index_of(note)
except:
raise
if steps < 0:
e = "Argument `steps` must be positive."
raise ValueError(e)
_previous = _note
for _ in range(steps):
_n = self.next_note(_previous)
_previous = _previous.nearest_neighbors(_n, "UPPER")
_final = _previous
return _final
def steps_below(self,
note: Union[Note, int, str],
steps: int) -> Note:
try:
_note = to_note(note)
except:
raise
try:
_idx = self.get_index_of(note)
except:
raise
if steps < 0:
e = "Argument `steps` must be positive."
raise ValueError(e)
_previous = _note
for _ in range(steps):
_n = self.previous_note(_previous)
_previous = _previous.nearest_neighbors(_n, "LOWER")
_final = _previous
return _final
def steps_between(self,
a: Note,
b: Note) -> int:
"""
Returns the number of scale-steps between two notes in a key. Direction is assumed as from `a` to `b`.
"""
try:
a, b = to_note(a), to_note(b)
_notes = [a, b]
except:
raise
if any([n not in self for n in _notes]):
bad = [n for n in _notes if n not in self]
e = f"The given from_note(s) `{bad}` do not exist in the Key `{self.tonic}`."
raise ValueError(e)
else:
steps = 0
_dir = 1
if a == b:
pass
elif a < b:
while a < b:
a = self.steps_above(a, 1)
steps += 1
elif a > b:
_dir = -1
while a > b:
a = self.steps_below(a, 1)
steps += 1
steps = steps * _dir
return steps
@staticmethod
def all_keys_with(notes: Union[Note, int, str, Sequence],
key_schemas: KeySchema | Sequence[KeySchema] = KeySchema.MAJOR,
_any: bool = False,
_raise: bool = False) -> List[Key]:
"""
Returns a list of Key instances which contain `notes`.
:param notes: One or many Note/from_note-like objects
:param _any: if True, will return all keys containing any of the given notes. Default behavior is to include only
keys which contain all `notes`
"""
if isinstance(key_schemas, KeySchema):
_key_schemas = [key_schemas]
else:
try:
_key_schemas = [KeySchema.parse(ks) for ks | |
import enum
import logging
import pathlib
import random
from abc import abstractmethod
from typing import Any, Dict, List, Optional, Tuple, Union, cast
import cloudpickle
import numpy as np
import torch
import torch.nn as nn
import determined as det
from determined import horovod, ipc, util, workload
from determined.horovod import hvd
from determined.pytorch import (
DataLoader,
LRScheduler,
PyTorchTrialContext,
Reducer,
TorchData,
_callback,
_Data,
_reduce_metrics,
data_length,
to_device,
)
from determined_common import check
# Apex is included only for GPU trials.
try:
import apex
except ImportError:
if torch.cuda.is_available():
logging.warning("Failed to import apex.")
pass
class _WarningLogs(enum.Enum):
FAILED_MOVING_TO_DEVICE = 1
class PyTorchTrialController(det.LoopTrialController):
def __init__(self, trial_inst: det.Trial, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
check.is_instance(trial_inst, PyTorchTrial, "PyTorchTrialController needs an PyTorchTrial")
self.trial = cast(PyTorchTrial, trial_inst)
self._check_evaluate_implementation()
self._init_model_and_optimizer()
# Validation loader will be undefined on process ranks > 0
# when the user defines `validate_full_dataset()`.
self.validation_loader = None # type: Optional[torch.utils.data.DataLoader]
self._set_data_loaders()
# Track whether a warning logging category has already been issued to the user.
self.warning_logged = {_WarningLogs.FAILED_MOVING_TO_DEVICE: False}
self.context.lr_scheduler = self.trial.create_lr_scheduler(self.context.optimizer)
self.callbacks = self.trial.build_callbacks()
# If a load path is provided load weights and restore the data location.
self._load()
self._configure_amp()
if self.hvd_config.use:
hvd.broadcast_parameters(self.context.model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(self.context.optimizer, root_rank=0)
self.training_iterator = iter(self.training_loader)
@staticmethod
def pre_execute_hook(env: det.EnvContext, hvd_config: horovod.HorovodContext) -> None:
# Initialize the correct horovod.
if hvd_config.use:
hvd.require_horovod_type("torch", "PyTorchTrial is in use.")
hvd.init()
PyTorchTrialController._set_random_seeds(env.trial_seed)
@staticmethod
def _set_random_seeds(seed: int) -> None:
# Set identical random seeds on all training processes.
# When using horovod, each worker will start at a unique
# offset in the dataset, ensuring it's processing a unique
# training batch.
random.seed(seed)
np.random.seed(seed)
torch.random.manual_seed(seed) # type: ignore
# TODO(Aaron): Add flag to enable determinism.
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
@staticmethod
def from_trial(*args: Any, **kwargs: Any) -> det.TrialController:
return PyTorchTrialController(*args, **kwargs)
@staticmethod
def from_native(*args: Any, **kwargs: Any) -> det.TrialController:
raise NotImplementedError("PyTorchTrial only supports the Native API")
@staticmethod
def support_determined_native() -> bool:
return True
def _init_device(self) -> None:
self.n_gpus = len(self.env.container_gpus)
if self.hvd_config.use:
check.gt(self.n_gpus, 0)
# We launch a horovod process per GPU. Each process
# needs to bind to a unique GPU.
self.device = torch.device(hvd.local_rank())
torch.cuda.set_device(self.device)
elif self.n_gpus > 0:
self.device = torch.device("cuda", 0)
else:
self.device = torch.device("cpu")
check.is_not_none(self.device)
def _init_model_and_optimizer(self) -> None:
self.context.model = self.trial.build_model()
# TODO: Check that optimizer is not an amp optimizer.
self.context.optimizer = self.trial.optimizer(self.context.model)
self._init_device()
self.context.model = self.context.model.to(self.device)
if self.hvd_config.use:
use_compression = self.hvd_config.fp16_compression
self.context.optimizer = hvd.DistributedOptimizer(
self.context.optimizer,
named_parameters=self.context.model.named_parameters(),
backward_passes_per_step=self.hvd_config.aggregation_frequency,
compression=hvd.Compression.fp16 if use_compression else hvd.Compression.none,
)
logging.debug("Initialized optimizer for distributed and optimized parallel training.")
elif self.n_gpus > 1:
check.eq(
self.hvd_config.aggregation_frequency,
1,
"Please enable `optimized_parallel` to use aggregation "
"frequency greater than 1 for single machine multi-GPU "
"training.",
)
self.context.model = nn.DataParallel(self.context.model)
logging.debug("Initialized mode for native parallel training.")
def _check_evaluate_implementation(self) -> None:
"""
Check if the user has implemented evaluate_batch
or evaluate_full_dataset.
"""
logging.debug(f"Evaluate_batch_defined: {self._evaluate_batch_defined()}.")
logging.debug(f"Evaluate full dataset defined: {self._evaluate_full_dataset_defined()}.")
check.not_eq(
self._evaluate_batch_defined(),
self._evaluate_full_dataset_defined(),
"Please define exactly one of: `evaluate_batch()` or `evaluate_full_dataset()`. "
"For most use cases `evaluate_batch()` is recommended is recommended because "
"it can be parallelized across all devices.",
)
def _get_amp_setting(self) -> str:
amp_setting = self.env.experiment_config.get("optimizations", {}).get(
"mixed_precision", None
)
check.is_not_none(amp_setting)
check.not_in(
"amp",
self.env.hparams,
"Please move `amp` setting from `hyperparameters` "
"to `optimizations[`mixed_precision`]`.",
)
return cast(str, amp_setting)
def use_amp(self) -> bool:
return self._get_amp_setting() != "O0"
def _configure_amp(self) -> None:
if self.use_amp():
if self.hvd_config.use:
check.eq(
self.hvd_config.aggregation_frequency,
1,
"Mixed precision training (AMP) is not supported with "
"aggregation frequency > 1.",
)
check.true(
torch.cuda.is_available(),
"Mixed precision training (AMP) is supported only on GPU slots.",
)
check.false(
not self.hvd_config.use and self.n_gpus > 1,
"To enable mixed precision training (AMP) for parallel training, "
'please set `resources["optimized_parallel"] = True`.',
)
logging.info(
f"Enabling mixed precision training with opt_level: {self._get_amp_setting()}."
)
self.context.model, self.context.optimizer = apex.amp.initialize(
self.context.model,
self.context.optimizer,
opt_level=self._get_amp_setting(),
verbosity=1 if self.is_chief or self.env.experiment_config.debug_enabled() else 0,
)
def _evaluate_batch_defined(self) -> bool:
return util.is_overridden(self.trial.evaluate_batch, PyTorchTrial)
def _evaluate_full_dataset_defined(self) -> bool:
return util.is_overridden(self.trial.evaluate_full_dataset, PyTorchTrial)
@staticmethod
def supports_multi_gpu_training() -> bool:
return True
@staticmethod
def supports_mixed_precision() -> bool:
return True
@staticmethod
def supports_averaging_training_metrics() -> bool:
return True
def _set_data_loaders(self) -> None:
skip_batches = (self.env.first_step() - 1) * self.batches_per_step
nreplicas = hvd.size() if self.hvd_config.use else 1
rank = hvd.rank() if self.hvd_config.use else 0
self.training_loader = self.trial.build_training_data_loader().get_data_loader(
repeat=True, skip=skip_batches, num_replicas=nreplicas, rank=rank
)
validation_dataset = self.trial.build_validation_data_loader()
if self._evaluate_batch_defined():
self.validation_loader = validation_dataset.get_data_loader(
repeat=False, skip=0, num_replicas=nreplicas, rank=rank
)
elif self.is_chief:
self.validation_loader = validation_dataset.get_data_loader(
repeat=False, skip=0, num_replicas=1, rank=0
)
def run(self) -> None:
for w, args, response_func in self.workloads:
if w.kind == workload.Workload.Kind.RUN_STEP:
check.eq(len(args), 1)
num_batches = cast(int, args[0])
response_func(
util.wrap_metrics(
self._train_for_step(w.step_id, num_batches),
self.context.get_stop_requested(),
)
)
elif w.kind == workload.Workload.Kind.COMPUTE_VALIDATION_METRICS:
response_func(
util.wrap_metrics(
self._compute_validation_metrics(), self.context.get_stop_requested()
)
)
elif w.kind == workload.Workload.Kind.CHECKPOINT_MODEL:
check.eq(len(args), 1)
check.is_instance(args[0], pathlib.Path)
path = cast(pathlib.Path, args[0])
response_func(self._save(path))
elif w.kind == workload.Workload.Kind.TERMINATE:
response_func({} if self.is_chief else workload.Skipped())
break
else:
raise AssertionError("Unexpected workload: {}".format(w.kind))
def get_epoch_idx(self, batch_id: int) -> int:
return batch_id // len(self.training_loader)
def _to_device(self, data: _Data) -> TorchData:
return to_device(
data, self.device, self.warning_logged[_WarningLogs.FAILED_MOVING_TO_DEVICE]
)
@staticmethod
def _average_gradients(parameters: Any, divisor: int) -> None:
check.gt_eq(divisor, 1)
if divisor == 1:
return
divisor_value = float(divisor)
for p in filter(lambda param: param.grad is not None, parameters):
p.grad.data.div_(divisor_value)
def _average_training_metrics(
self, per_batch_metrics: List[Dict[str, Any]]
) -> List[Dict[str, Any]]:
"""Average training metrics across GPUs"""
check.true(self.hvd_config.use, "Can only average training metrics in multi-GPU training.")
metrics_timeseries = util._list_to_dict(per_batch_metrics)
# combined_timeseries is: dict[metric_name] -> 2d-array.
# A measurement is accessed via combined_timeseries[metric_name][process_idx][batch_idx].
combined_timeseries, _ = self._combine_metrics_across_processes(
metrics_timeseries, num_batches=len(per_batch_metrics)
)
# If the value for a metric is a single-element array, the averaging process will
# change that into just the element. We record what metrics are single-element arrays
# so we can wrap them in an array later (for perfect compatibility with non-averaging
# codepath).
array_metrics = []
for metric_name in per_batch_metrics[0].keys():
if isinstance(per_batch_metrics[0][metric_name], np.ndarray):
array_metrics.append(metric_name)
if self.is_chief:
combined_timeseries_type = Dict[str, List[List[Any]]]
combined_timeseries = cast(combined_timeseries_type, combined_timeseries)
num_batches = len(per_batch_metrics)
num_processes = hvd.size()
averaged_metrics_timeseries = {} # type: Dict[str, List]
for metric_name in combined_timeseries.keys():
averaged_metrics_timeseries[metric_name] = []
for batch_idx in range(num_batches):
batch = [
combined_timeseries[metric_name][process_idx][batch_idx]
for process_idx in range(num_processes)
]
np_batch = np.array(batch)
batch_avg = np.mean(np_batch[np_batch != None]) # noqa: E711
if metric_name in array_metrics:
batch_avg = np.array(batch_avg)
averaged_metrics_timeseries[metric_name].append(batch_avg)
per_batch_metrics = util._dict_to_list(averaged_metrics_timeseries)
return per_batch_metrics
def _auto_step_lr_scheduler_per_batch(self, batch_idx: int, lr_scheduler: LRScheduler) -> None:
"""
This function aims at automatically step a LR scheduler. It should be called per batch.
"""
if lr_scheduler._step_mode == LRScheduler.StepMode.STEP_EVERY_BATCH:
lr_scheduler.step()
elif lr_scheduler._step_mode == LRScheduler.StepMode.STEP_EVERY_EPOCH:
mod = (batch_idx + 1) % len(self.training_loader)
if mod == 0 or mod < self.hvd_config.aggregation_frequency:
lr_scheduler.step()
def _train_for_step(self, step_id: int, batches_per_step: int) -> workload.Response:
check.gt(step_id, 0)
# Set the behavior of certain layers (e.g., dropout) that are different
# between training and inference.
self.context.model.train()
for callback in self.callbacks.values():
callback.on_train_step_start(step_id)
step_idx = step_id - 1
start = step_idx * batches_per_step
end = start + batches_per_step
per_batch_metrics = [] # type: List[Dict]
num_inputs = 0
for batch_idx in range(start, end):
batch = next(self.training_iterator)
num_inputs += data_length(batch)
batch = self._to_device(batch)
# Forward pass.
tr_metrics = self.trial.train_batch(
batch=batch,
model=self.context.model,
epoch_idx=self.get_epoch_idx(batch_idx),
batch_idx=batch_idx,
)
if isinstance(tr_metrics, torch.Tensor):
tr_metrics = {"loss": tr_metrics}
check.is_instance(
tr_metrics,
dict,
"train_batch() must return a dictionary "
"mapping string names to Tensor metrics, got {type(tr_metrics)}",
)
check.is_in("loss", tr_metrics.keys(), 'Please include "loss" in you training metrics.')
# Backwards pass.
loss = tr_metrics["loss"]
communicate_and_update = (batch_idx + 1) % self.hvd_config.aggregation_frequency == 0
if self.use_amp():
with apex.amp.scale_loss(loss, self.context.optimizer) as scaled_loss:
scaled_loss.backward()
if self.hvd_config.use and communicate_and_update:
# When using horovod, we need to finish communicating gradient
# updates before they are unscaled which happens when we exit
# of this context manager.
self.context.optimizer.synchronize()
else:
loss.backward()
# Communication needs to be synchronized so that is completed
# before we apply gradient clipping and `step()`.
if communicate_and_update and self.hvd_config.use:
self.context.optimizer.synchronize()
if communicate_and_update:
parameters = (
self.context.model.parameters()
if not self.use_amp()
else apex.amp.master_params(self.context.optimizer)
)
if self.hvd_config.average_aggregated_gradients:
self._average_gradients(
parameters=parameters, divisor=self.hvd_config.aggregation_frequency
)
# TODO: Remove this check in v0.12.8.
check.false(
self.env.hparams.get("clip_grad_l2_norm", None)
or self.env.hparams.get("clip_grad_val", None),
"Please specify gradient clipping via callbacks.",
)
for callback in self.callbacks.values():
callback.on_before_optimizer_step(parameters)
if self.hvd_config.use:
with self.context.optimizer.skip_synchronize():
self.context.optimizer.step()
else:
self.context.optimizer.step()
self.context.optimizer.zero_grad()
# Step learning rate of a LRScheduler.
if self.context.lr_scheduler is not None:
self._auto_step_lr_scheduler_per_batch(batch_idx, self.context.lr_scheduler)
for name, | |
(?,?)", (self.mcqs, self.email))
cur.execute(f"SELECT QDes FROM CalculusOb WHERE Email = '{self.email}';")
result = cur.fetchall()
print(result)
conn.commit()
conn.close()
else:
print("yes")
cur.execute("CREATE TABLE IF NOT EXISTS CalculusSub (QDes TEXT NOT NULL, Email TEXT NOT NULL, FOREIGN KEY (Email) REFERENCES user (Email))")
cur.execute("INSERT INTO CalculusSub (QDes, Email) VALUES (?,?)", (tex, self.email))
cur.execute(f"SELECT QDes FROM CalculusSub WHERE Email = '{self.email}';")
result = cur.fetchall()
print(result)
conn.commit()
conn.close()
elif self.selected == 'Probability':
self.cc = self.chep.text()
print('h')
if self.cc == '2':
print('h')
self.t = self.optionedit6.text()
self.f = self.optionedit7.text()
self.mcqs = tex + self.t + self.f
cur.execute("CREATE TABLE IF NOT EXISTS ProbabilityOb (QDes TEXT NOT NULL, Email TEXT NOT NULL, FOREIGN KEY (Email) REFERENCES user (Email))")
cur.execute("INSERT INTO ProbabilityOb (QDes, Email) VALUES (?,?)", (self.mcqs, self.email))
cur.execute(f"SELECT QDes FROM ProbabilityOb WHERE Email = '{self.email}';")
result = cur.fetchall()
print(result)
conn.commit()
conn.close()
elif self.cc == '3':
print('h')
self.one = self.optionedit1.toPlainText()
self.two = self.optionedit2.toPlainText()
self.three = self.optionedit3.toPlainText()
self.mcqs = tex + self.one + self.two + self.three
cur.execute("CREATE TABLE IF NOT EXISTS ProbabilityOb (QDes TEXT NOT NULL, Email TEXT NOT NULL, FOREIGN KEY (Email) REFERENCES user (Email))")
cur.execute("INSERT INTO ProbabilityOb (QDes, Email) VALUES (?,?)", (self.mcqs, self.email))
cur.execute(f"SELECT QDes FROM ProbabilityOb WHERE Email = '{self.email}';")
result = cur.fetchall()
print(result)
conn.commit()
conn.close()
elif self.cc == '4':
self.one = self.optionedit1.toPlainText()
self.two = self.optionedit2.toPlainText()
self.three = self.optionedit3.toPlainText()
self.four = self.optionedit4.toPlainText()
self.mcqs = tex + self.one + self.two + self.three + self.four
cur.execute("CREATE TABLE IF NOT EXISTS ProbabilityOb (QDes TEXT NOT NULL, Email TEXT NOT NULL, FOREIGN KEY (Email) REFERENCES user (Email))")
cur.execute("INSERT INTO ProbabilityOb (QDes, Email) VALUES (?,?)", (self.mcqs, self.email))
cur.execute(f"SELECT QDes FROM ProbabilityOb WHERE Email = '{self.email}';")
result = cur.fetchall()
print(result)
conn.commit()
conn.close()
elif self.cc == '5':
self.one = self.optionedit1.toPlainText()
self.two = self.optionedit2.toPlainText()
self.three = self.optionedit3.toPlainText()
self.four = self.optionedit4.toPlainText()
self.five = self.optionedit5.toPlainText()
self.mcqs = tex + self.one + self.two + self.three + self.four + self.five
cur.execute("CREATE TABLE IF NOT EXISTS ProbabilityOb (QDes TEXT NOT NULL, Email TEXT NOT NULL, FOREIGN KEY (Email) REFERENCES user (Email))")
cur.execute("INSERT INTO ProbabilityOb (QDes, Email) VALUES (?,?)", (self.mcqs, self.email))
cur.execute(f"SELECT QDes FROM ProbabilityOb WHERE Email = '{self.email}';")
result = cur.fetchall()
print(result)
conn.commit()
conn.close()
else:
print("yes")
cur.execute("CREATE TABLE IF NOT EXISTS ProbabilitySub (QDes TEXT NOT NULL, Email TEXT NOT NULL, FOREIGN KEY (Email) REFERENCES user (Email))")
cur.execute("INSERT INTO ProbabilitySub (QDes, Email) VALUES (?,?)", (tex, self.email))
cur.execute(f"SELECT QDes FROM ProbabilitySub WHERE Email = '{self.email}';")
result = cur.fetchall()
print(result)
conn.commit()
conn.close()
@qtc.pyqtSlot(str)
def updatefield(self, word):
self.word = word
self.cursor = self.editor.textCursor()
self.cursor.insertText(self.word)
@qtc.pyqtSlot(str, str)
def update(self, po, ba):
self.po = po
self.ba = ba
self.cursor = self.editor.textCursor()
self.cursor.insertText(self.po)
self.cursor.insertText(self.ba)
@qtc.pyqtSlot(str)
def matrixupdate(self, arry):
self.arry = arry
self.cursor = self.editor.textCursor()
self.cursor.insertText(self.arry)
@qtc.pyqtSlot(str)
def squpdate(self, s):
self.s = s
#self.sqaures = sqrt(self.s)
print("hello")
#self.q = ["$\frac{a}{b}$"]
#self.q = r'\overline{b}^3'
self.q = r"$"+ "frac{a}{b}" + r"$"
#self.label =u'\u222B'
print(self.q) #\sqrt{self.s}
self.cursor = self.editor.textCursor()
self.cursor.insertText(self.q)
#self.cursor.setMarkdown(self.q)
def create_menu_bar(self):
menuBar = QMenuBar(self)
""" add elements to the menubar """
# App icon will go here
app_icon = menuBar.addMenu(QIcon("doc_icon.png"), "icon")
# file menu **
file_menu = QMenu("File", self)
menuBar.addMenu(file_menu)
save_action = QAction('Save', self)
save_action.triggered.connect(self.file_save)
file_menu.addAction(save_action)
open_action = QAction('Open', self)
open_action.triggered.connect(self.file_open)
file_menu.addAction(open_action)
rename_action = QAction('Rename', self)
rename_action.triggered.connect(self.file_saveas)
file_menu.addAction(rename_action)
pdf_action = QAction("Save as PDF", self)
pdf_action.triggered.connect(self.save_pdf)
file_menu.addAction(pdf_action)
# edit menu **
edit_menu = QMenu("Edit", self)
menuBar.addMenu(edit_menu)
# paste
paste_action = QAction('Paste', self)
paste_action.triggered.connect(self.editor.paste)
edit_menu.addAction(paste_action)
# clear
clear_action = QAction('Clear', self)
clear_action.triggered.connect(self.editor.clear)
edit_menu.addAction(clear_action)
# select all
select_action = QAction('Select All', self)
select_action.triggered.connect(self.editor.selectAll)
edit_menu.addAction(select_action)
# view menu **
view_menu = QMenu("View", self)
menuBar.addMenu(view_menu)
# fullscreen
fullscr_action = QAction('Full Screen View', self)
fullscr_action.triggered.connect(lambda : self.showFullScreen())
view_menu.addAction(fullscr_action)
# normal screen
normscr_action = QAction('Normal View', self)
normscr_action.triggered.connect(lambda : self.showNormal())
view_menu.addAction(normscr_action)
# minimize
minscr_action = QAction('Minimize', self)
minscr_action.triggered.connect(lambda : self.showMinimized())
view_menu.addAction(minscr_action)
self.setMenuBar(menuBar)
def create_toolbar(self):
# Using a title
ToolBar = QToolBar("Tools", self)
# undo
undo_action = QAction(QIcon("undo.png"), 'Undo', self)
undo_action.triggered.connect(self.editor.undo)
ToolBar.addAction(undo_action)
# redo
redo_action = QAction(QIcon("redo.png"), 'Redo', self)
redo_action.triggered.connect(self.editor.redo)
ToolBar.addAction(redo_action)
# adding separator
ToolBar.addSeparator()
# copy
copy_action = QAction(QIcon("copy.png"), 'Copy', self)
copy_action.triggered.connect(self.editor.copy)
ToolBar.addAction(copy_action)
# cut
cut_action = QAction(QIcon("cut.png"), 'Cut', self)
cut_action.triggered.connect(self.editor.cut)
ToolBar.addAction(cut_action)
# paste
paste_action = QAction(QIcon("paste.png"), 'Paste', self)
paste_action.triggered.connect(self.editor.paste)
ToolBar.addAction(paste_action)
# adding separator
ToolBar.addSeparator()
ToolBar.addSeparator()
# fonts
self.font_combo = QComboBox(self)
self.font_combo.addItems(["Courier Std", "Hellentic Typewriter Regular", "Helvetica", "Arial", "SansSerif", "Helvetica", "Times", "Monospace"])
self.font_combo.activated.connect(self.set_font) # connect with function
ToolBar.addWidget(self.font_combo)
# font size
self.font_size = QSpinBox(self)
self.font_size.setValue(12)
self.font_size.valueChanged.connect(self.set_font_size) # connect with funcion
ToolBar.addWidget(self.font_size)
# separator
ToolBar.addSeparator()
# bold
bold_action = QAction(QIcon("bold.png"), 'Bold', self)
bold_action.triggered.connect(self.bold_text)
ToolBar.addAction(bold_action)
# underline
underline_action = QAction(QIcon("underline.png"), 'Underline', self)
underline_action.triggered.connect(self.underline_text)
ToolBar.addAction(underline_action)
# italic
italic_action = QAction(QIcon("italic.png"), 'Italic', self)
italic_action.triggered.connect(self.italic_text)
ToolBar.addAction(italic_action)
# separator
ToolBar.addSeparator()
# text alignment
right_alignment_action = QAction(QIcon("alignright.png"), 'Align Right', self)
right_alignment_action.triggered.connect(lambda : self.editor.setAlignment(Qt.AlignRight))
ToolBar.addAction(right_alignment_action)
left_alignment_action = QAction(QIcon("alignleft.png"), 'Align Left', self)
left_alignment_action.triggered.connect(lambda : self.editor.setAlignment(Qt.AlignLeft))
ToolBar.addAction(left_alignment_action)
justification_action = QAction(QIcon("aligncenter.png"), 'Center/Justify', self)
justification_action.triggered.connect(lambda : self.editor.setAlignment(Qt.AlignCenter))
ToolBar.addAction(justification_action)
ToolBar.addSeparator()
subAction = QAction(QIcon("icons/subscript.png"),"Subscript",self)
subAction.triggered.connect(self.subScript)
ToolBar.addAction(subAction)
superAction = QAction(QIcon("icons/superscript.png"),"Superscript",self)
superAction.triggered.connect(self.superScript)
ToolBar.addAction(superAction)
overLineAction = QAction(QIcon("icons/superscript.png"),"OverLine",self)
overLineAction.triggered.connect(self.overLine)
ToolBar.addAction(overLineAction)
# separator
# zoom in
#zoom_in_action = QAction(QIcon("zoom-in.png"), 'Zoom in', self)
#zoom_in_action.triggered.connect(self.editor.zoomIn)
#ToolBar.addAction(zoom_in_action)
# zoom out
#zoom_out_action = QAction(QIcon("zoom-out.png"), 'Zoom out', self)
#zoom_out_action.triggered.connect(self.editor.zoomOut)
#ToolBar.addAction(zoom_out_action)
# separator
ToolBar.addSeparator()
self.addToolBar(ToolBar)
def strike(self):
# Grab the text's format
fmt = self.editor.currentCharFormat()
# Set the fontStrikeOut property to its opposite
fmt.setFontStrikeOut(not fmt.fontStrikeOut())
#fmt.setFontUnderline(fmt.fontUnderline())
# And set the next char format
self.editor.setCurrentCharFormat(fmt)
def overLine(self):
# Grab the current format
fmt = self.editor.currentCharFormat()
# And get the vertical alignment property
#align = fmt.verticalAlignment()
align = fmt.fontOverline()
# Toggle the state
if align == QTextCharFormat.AlignNormal:
#fmt.setVerticalAlignment(QTextCharFormat.fontUnderline)
#fmt.setFontUnderline(QTextCharFormat.fontUnderline)
fmt.setFontOverline(True)
else:
fmt.setVerticalAlignment(QTextCharFormat.AlignNormal)
# Set the new format
self.editor.setCurrentCharFormat(fmt)
def subScript(self):
# Grab the current format
fmt = self.editor.currentCharFormat()
# And get the vertical alignment property
align = fmt.verticalAlignment()
# Toggle the state
if align == QTextCharFormat.AlignNormal:
fmt.setVerticalAlignment(QTextCharFormat.AlignSubScript)
else:
fmt.setVerticalAlignment(QTextCharFormat.AlignNormal)
# Set the new format
self.editor.setCurrentCharFormat(fmt)
def superScript(self):
# Grab the current format
fmt = self.editor.currentCharFormat()
# And get the vertical alignment property
align = fmt.verticalAlignment()
# Toggle the state
if align == QTextCharFormat.AlignNormal:
fmt.setVerticalAlignment(QTextCharFormat.AlignSuperScript)
else:
fmt.setVerticalAlignment(QTextCharFormat.AlignNormal)
# Set the new format
self.editor.setCurrentCharFormat(fmt)
def italic_text(self):
# if already italic, change into normal, else italic
state = self.editor.fontItalic()
self.editor.setFontItalic(not(state))
def underline_text(self):
# if already underlined, change into normal, else underlined
state = self.editor.fontUnderline()
self.editor.setFontUnderline(not(state))
def bold_text(self):
if self.editor.fontWeight() != QFont.Bold:
self.editor.setFontWeight(QFont.Bold)
def set_font(self):
font = self.font_combo.currentText()
self.editor.setCurrentFont(QFont(font))
def set_font_size(self):
value = self.font_size.value()
self.editor.setFontPointSize(value)
# we can also make it one liner without writing such function.
# by using lamba function -
# self.font_size.valueChanged.connect(self.editor.setFontPointSize(self.font_size.value()))
def file_open(self):
self.path, _ = QFileDialog.getOpenFileName(self, "Open file", "", "Text documents (*.text);Text documents (*.txt);All files (*.*)")
try:
#with open(self.path, 'r') as f:
# text = f.read()
text = docx2txt.process(self.path) # docx2txt
#doc = Document(self.path) # if using docx
#text = ''
#for line in doc.paragraphs:
# text += line.text
except Exception as e:
print(e)
else:
self.editor.setText(text)
self.update_title()
def file_save(self):
print(self.path)
if self.path == '':
# If we do not have a path, we need to use Save As.
self.file_saveas()
text = self.editor.toPlainText()
try:
with open(self.path, 'w') as f:
f.write(text)
self.update_title()
except Exception as e:
print(e)
def file_saveas(self):
self.path, _ = QFileDialog.getSaveFileName(self, "Save file", "", "text documents (*.text);Text documents (*.txt);All files (*.*)")
if self.path == '':
return # If dialog is cancelled, will return ''
text = self.editor.toPlainText()
try:
with open(path, 'w') as f:
f.write(text)
self.update_title()
except Exception as e:
print(e)
def update_title(self):
self.setWindowTitle(self.title + ' ' + self.path)
def save_pdf(self):
f_name, _ = QFileDialog.getSaveFileName(self, "Export PDF", None, "PDF files (.pdf);;All files()")
print(f_name)
if f_name != '': # if name not empty
printer = QPrinter(QPrinter.HighResolution)
printer.setOutputFormat(QPrinter.PdfFormat)
printer.setOutputFileName(f_name)
self.editor.document().print_(printer)
###################################################################
###################################################################
class Ui_keyboard(MainApp):
submitted = qtc.pyqtSignal(str)
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(600, 400)
#MainWindow.setWindowFlag(QtCore.Qt.FramelessWindowHint)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setContentsMargins(50, -1, -1, -1)
self.verticalLayout.setObjectName("verticalLayout")
self.pushButton_36 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton.sizePolicy().hasHeightForWidth())
self.pushButton.setSizePolicy(sizePolicy)
self.pushButton.setMaximumSize(QtCore.QSize(50, 50))
#self.pushButton_2.setIcon(QIcon("image : url(cal1.png);"))
self.pushButton.setStyleSheet("background-color:white;\n"
"image : url(cal1.png);")
self.pushButton.setObjectName("pushButton_2")
self.gridLayout.addWidget(self.pushButton, 1, 0, 1, 1)
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_2.sizePolicy().hasHeightForWidth())
self.pushButton_2.setSizePolicy(sizePolicy)
self.pushButton_2.setMaximumSize(QtCore.QSize(50, 50))
self.pushButton_2.setStyleSheet("background-color:white;\n"
"image : url(cal2.png);")
self.pushButton_2.setObjectName("pushButton_26")
self.gridLayout.addWidget(self.pushButton_2, 1, 1, 1, 1)
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
| |
template_name = 'agile_board.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class timeline_2(TemplateView):
"""docstring for timeline_2."""
template_name = 'timeline_2.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class diff(TemplateView):
"""docstring for diff."""
template_name = 'diff.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class pdf_viewer(TemplateView):
"""docstring for pdf_viewer."""
template_name = 'pdf_viewer.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class i18support(TemplateView):
"""docstring for i18support."""
template_name = 'i18support.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class sweetalert(TemplateView):
"""docstring for sweetalert."""
template_name = 'sweetalert.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class idle_timer(TemplateView):
"""docstring for idle_timer."""
template_name = 'idle_timer.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class truncate(TemplateView):
"""docstring for truncate."""
template_name = 'truncate.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class password_meter(TemplateView):
"""docstring for password_meter."""
template_name = 'password_meter.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class spinners(TemplateView):
"""docstring for spinners."""
template_name = 'spinners.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class spinners_usage(TemplateView):
"""docstring for spinners_usage."""
template_name = 'spinners_usage.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class tinycon(TemplateView):
"""docstring for tinycon."""
template_name = 'tinycon.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class google_maps(TemplateView):
"""docstring for google_maps."""
template_name = 'google_maps.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class datamaps(TemplateView):
"""docstring for datamaps."""
template_name = 'datamaps.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class social_buttons(TemplateView):
"""docstring for social_buttons."""
template_name = 'social_buttons.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class code_editor(TemplateView):
"""docstring for code_editor."""
template_name = 'code_editor.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class modal_window(TemplateView):
"""docstring for modal_window."""
template_name = 'modal_window.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class clipboard(TemplateView):
"""docstring for clipboard."""
template_name = 'clipboard.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class text_spinners(TemplateView):
"""docstring for text_spinners."""
template_name = 'text_spinners.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class forum_main(TemplateView):
"""docstring for forum_main."""
template_name = 'forum_main.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class validation(TemplateView):
"""docstring for validation."""
template_name = 'validation.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class tree_view(TemplateView):
"""docstring for tree_view."""
template_name = 'tree_view.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class loading_buttons(TemplateView):
"""docstring for loading_buttons."""
template_name = 'loading_buttons.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class chat_view(TemplateView):
"""docstring for chat_view."""
template_name = 'chat_view.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class masonry(TemplateView):
"""docstring for masonry."""
template_name = 'masonry.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class tour(TemplateView):
"""docstring for tour."""
template_name = 'tour.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class typography(TemplateView):
"""docstring for typography."""
template_name = 'typography.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class icons(TemplateView):
"""docstring for icons."""
template_name = 'icons.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class draggable_panels(TemplateView):
"""docstring for draggable_panels."""
template_name = 'draggable_panels.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class buttons(TemplateView):
"""docstring for buttons."""
template_name = 'buttons.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class video(TemplateView):
"""docstring for video."""
template_name = 'video.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class tabs_panels(TemplateView):
"""docstring for tabs_panels."""
template_name = 'tabs_panels.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class tabs(TemplateView):
"""docstring for tabs."""
template_name = 'tabs.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class notifications(TemplateView):
"""docstring for notifications."""
template_name = 'notifications.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class helper_classes(TemplateView):
"""docstring for helper_classes."""
template_name = 'helper_classes.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class badges_labels(TemplateView):
"""docstring for badges_labels."""
template_name = 'badges_labels.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class grid_options(TemplateView):
"""docstring for grid_options."""
template_name = 'grid_options.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class table_basic(TemplateView):
"""docstring for table_basic."""
template_name = 'table_basic.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
'''
Model for database interaction
--------------------------------------------------------
Contains output for database objects
in the following format
CSV
JSON
EXCEL
'''
from .models import cactea
import django_tables2 as tables
from django_tables2.export.views import ExportMixin
from .tables import CacteaTable, CacResource
from .filters import TableFilter
from django.template import RequestContext
class table_data_tables(TemplateView):
"""docstring for table_data_tables."""
template_name = 'table_data_tables.html'
def get(self, request, **kwargs):
cac = cactea.objects.all()
table = CacteaTable(cac, order_by_field = 'sort')
if request.method == 'GET':
return render(request, self.template_name, {'table': cac})
#export csv
class export_csv(TemplateView):
def get(self, request, **kwargs):
export = CacResource()
dataset = export.export()
response = HttpResponse(dataset.csv, content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="cactea.csv"'
return response
#export json
class export_json(TemplateView):
def get(self, request, **kwargs):
export = CacResource()
dataset = export.export()
response = HttpResponse(dataset.json, content_type='application/json')
response['Content-Disposition'] = 'attachment; filename="cactea.json"'
return response
#export excel
class export_excel(TemplateView):
def get(self, request, **kwargs):
export = CacResource()
dataset = export.export()
response = HttpResponse(dataset.xls, content_type='application/vnd.ms-excel')
response['Content-Disposition'] = 'attachment; filename="cactea.xls"'
return response
#Serialization junction
from .serializers import CacteaSerializer
from rest_framework import viewsets
class CacteaViewSet(viewsets.ModelViewSet):
queryset = cactea.objects.all()
serializer_class = CacteaSerializer
def populate_database():
import csv
from .models import cactea
from os import chdir
# path = chdir('D:\\FREELANCER\\DjangoProjectGamma\\DIRECTORY\\CACTEA\\data')
with open('D:\\FREELANCER\\DjangoProjectGamma\\DIRECTORY\\CACTEA\\data\\new_storage.csv') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
p = cactea(id_IT = row['id'], SERIAL_NUMBER = row['SERIAL_NUMBER'], MANUFACTURER = row['MANUFACTURER'],
NAME = row['NAME'], MODEL = row['MODEL'], LOCATION = row['LOCATION'], ROOM = row['ROOM'],
ENERGY_CONSUMPTION = row['ENERGY_CONSUMPTION'], BTU = row['BTU'], MICROCODE = row['MICROCODE'],
PATCH_LEVEL = row['PATCH_LEVEL'], CUSTOMER_ID = row['CUSTOMER_ID'], HOSTS_APPLICATION = row['HOSTS_APPLICATION'],
TCPADDR1 = row['TCPADDR1'], TCPADDR2 = row['TCPADDR2'], TCPADDR3 = row['TCPADDR3'],
SW_GUI = row['SW_GUI'], INVESTMENT_DATE = row['INVESTMENT_DATE'],
MAINTENANCE_EXPIRATION_DATE = row['MAINTENANCE_EXPIRATION_DATE'], MAINTENANCE_PROVIDER = row['MAINTENANCE_PROVIDER'],
MAINTENANCE_CONTRACT = row['MAINTENANCE_CONTRACT'], IS_EXTENSION_REQUIRED = row['IS_EXTENSION_REQUIRED'],
IS_MIGRATION_REQUIRED = row['IS_MIGRATION_REQUIRED'], RAID_CONFIG = row['RAID_CONFIG'],
USABLE_CAPACITY_GB = row['USABLE_CAPACITY_GB'], CAPACITY_IN_USE_GB = row['CAPACITY_IN_USE_GB'],
FREE_CAPACITY_GB = row['FREE_CAPACITY_GB'], EXTENSION = row['EXTENSION'], COMMENT_1 = row['COMMENT_1'],
STORAGE_TYPE = row['STORAGE_TYPE'], READADMIN = row['READADMIN'], RACK = row['RACK'],
COST_CENTER = row['COST_CENTER'], CO2_KG = row['CO2_KG'], SECOND_INVESTMENT_DATE = row['SECOND_INVESTMENT_DATE'])
p.save()
populate_database()
class table_foo_table(TemplateView):
"""docstring for table_foo_table."""
template_name = 'table_data_tables.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class jq_grid(TemplateView):
"""docstring for jq_grid."""
template_name = 'jq_grid.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class ecommerce_products_grid(TemplateView):
"""docstring for ecommerce_products_grid."""
template_name = 'ecommerce_products_grid.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class ecommerce_product_list(TemplateView):
"""docstring for ecommerce_product_list."""
template_name = 'ecommerce_product_list.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class ecommerce_product(TemplateView):
"""docstring for ecommerce_product."""
template_name = 'ecommerce_product.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class ecommerce_product_detail(TemplateView):
"""docstring for ecommerce_product_detail."""
template_name = 'ecommerce_product_detail.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class ecommerce_cart(TemplateView):
"""docstring for ecommerce-cart."""
template_name = 'ecommerce-cart.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class ecommerce_orders(TemplateView):
"""docstring for ecommerce-orders."""
template_name = 'ecommerce-orders.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class ecommerce_payments(TemplateView):
"""docstring for ecommerce_payments."""
template_name = 'ecommerce_payments.html'
def get(self, request, **kwargs):
if request.method == 'GET':
return render(request, self.template_name, context = None)
class basic_gallery(TemplateView):
"""docstring for basic_gallery."""
template_name = 'basic_gallery.html'
def get(self, request, **kwargs):
if | |
self.decompose_nuktas:
# decomposing Nukta based composite characters
text=text.replace('\u09dc','\u09a1'+BengaliNormalizer.NUKTA)
text=text.replace('\u09dd','\u09a2'+BengaliNormalizer.NUKTA)
text=text.replace('\u09df','\u09af'+BengaliNormalizer.NUKTA)
else:
# recomposing Nukta based composite characters
text=text.replace('\u09a1'+BengaliNormalizer.NUKTA, '\u09dc')
text=text.replace('\u09a2'+BengaliNormalizer.NUKTA, '\u09dd')
text=text.replace('\u09af'+BengaliNormalizer.NUKTA, '\u09df')
if self.lang=='as':
if self.do_remap_assamese_chars:
# Normalize Assamese chars to Bengali
text=text.replace('\u09f0','\u09b0') # 'ra' character
text=text.replace('\u09f1','\u09ac') # 'va' character to 'ba'
else:
text=text.replace('\u09b0','\u09f0') # 'ra' character
if self.do_canonicalize_khanda_ta:
text=text.replace('\u09ce','\u09a4\u09cd') # ৎ -> ত্
# replace the poorna virama codes specific to script
# with generic Indic script codes
text=text.replace('\u09e4','\u0964')
text=text.replace('\u09e5','\u0965')
# replace pipe character for poorna virama
text=text.replace('\u007c','\u0964')
# replace bengali currency numerator four for poorna virama (it looks similar and is used as a substitute)
text=text.replace('\u09f7','\u0964')
# two part dependent vowels
text=text.replace('\u09c7\u09be','\u09cb')
text=text.replace('\u09c7\u09d7','\u09cc')
if self.do_colon_to_visarga: # correct visarge
text=re.sub(r'([\u0980-\u09ff]):','\\1\u0983',text)
return text
class TamilNormalizer(BaseNormalizer):
"""
Normalizer for the Tamil script. In addition to basic normalization by the super class,
* Replace the reserved character for poorna virama (if used) with the recommended generic Indic scripts poorna virama
* canonicalize two-part dependent vowel signs
* replace colon ':' by visarga if the colon follows a charcter in this script
"""
def __init__(self,lang='ta',remove_nuktas=False,decompose_nuktas=False,nasals_mode='do_nothing',
do_normalize_chandras=False,do_normalize_vowel_ending=False,do_normalize_numerals=False,convert_numerals_to_native=False,do_colon_to_visarga=False,
normalize_grantha=False,do_convert_to_reformed_vowels=False):
super(TamilNormalizer,self).__init__(lang,remove_nuktas,decompose_nuktas,nasals_mode,do_normalize_chandras,do_normalize_vowel_ending,do_normalize_numerals,convert_numerals_to_native,do_colon_to_visarga)
self.normalize_grantha = normalize_grantha
self.do_convert_to_reformed_vowels=do_convert_to_reformed_vowels
def normalize(self,text):
# common normalization for Indic scripts
text=super(TamilNormalizer,self).normalize(text)
# replace the poorna virama codes specific to script
# with generic Indic script codes
text=text.replace('\u0be4','\u0964')
text=text.replace('\u0be5','\u0965')
# two part dependent vowels
text=text.replace('\u0b92\u0bd7','\u0b94')
text=text.replace('\u0bc6\u0bbe','\u0bca')
text=text.replace('\u0bc7\u0bbe','\u0bcb')
text=text.replace('\u0bc6\u0bd7','\u0bcc')
# ா் -> ர் (Example: வாடிக்கையாளா் -> வாடிக்கையாளர்)
text=text.replace('\u0bbe\u0bcd','\u0bb0\u0bcd')
text=text.replace('\u0bca\u0bcd','\u0bc6\u0bb0\u0bcd')
text=text.replace('\u0bcb\u0bcd','\u0bc7\u0bb0\u0bcd')
# "Shri" can be written with two different initial letters: ஶ (ie. ஶ்ரீ ʃɾī) or ஸ (ie. ஸ்ரீ s͓ɾī). The result looks identical.
# Since 2005, the Unicode Consortium has recommended use of the former, but both are still in wide circulation.
text=text.replace('\u0bb8\u0bcd\u0bb0\u0bc0','\u0bb6\u0bcd\u0bb0\u0bc0')
# Tamil digits ௧ (1), ௫ (5), ௭ (7) look identical to actual letters
if not re.search('[\u0be8-\u0bea\u0bec\u0bee-\u0bef]', text):
# Heuristc: If other digits are not there, assume error
text=text.replace('\u0be7','\u0b95')
text=text.replace('\u0beb','\u0bb0\u0bc1')
text=text.replace('\u0bed','\u0b8e')
text=text.replace('\u0be6','0')
text=text.replace('\u0bf9','\u0bb0\u0bc2') # ௹ -> ரூ (Rupee sign)
if self.remove_nuktas:
# In Tamil, it's equivalent to removing translingual ஃ
# like ஃஜ (za), ஃக (qa), ஃப (fa), ஃவ (wa), ஃஸ (xa), ஃஶ (zha)
text=re.sub('\u0b83([\u0b95-\u0bb9])','\\1',text)
# In other places, ஃ denotes a voiceless uvular fricative or visarga if final
if self.normalize_grantha:
# Convert additional grantha consonants to core Tamil
text=text.replace('\u0bb6\u0bcd\u0bb0\u0bc0', '\u0ba4\u0bbf\u0bb0\u0bc1') # ஸ்ரீ -> திரு
text=text.replace('\u0b9c','\u0b9a') # ஜ -> ச
text=text.replace('\u0bb6','\u0b9a') # ஶ -> ச
text=text.replace('\u0bb7','\u0b9a') # ஷ -> ச
text=text.replace('\u0bb8','\u0b9a') # ஸ -> ச
text=text.replace('\u0bb9','\u0b95') # ஹ -> க
text=text.replace('\u0b82','\u0bae\u0bcd') # ஂ -> ம்
if self.do_convert_to_reformed_vowels:
# Independent vowels
text=text.replace('\u0b90','\u0b85\u0baf\u0bcd') # ஐ -> அய்
text=text.replace('\u0b94','\u0b85\u0bb5\u0bcd') # ஔ -> அவ்
## TODO: Verify if not necessary for dependent vowels
# # Correct Indic visarge. Does not apply to Tamil, since visarga is ஃ
# if self.do_colon_to_visarga:
# text=re.sub(r'([\u0b80-\u0bff]):','\\1\u0b83',text)
return text
class TeluguNormalizer(BaseNormalizer):
"""
Normalizer for the Teluguscript. In addition to basic normalization by the super class,
* Replace the reserved character for poorna virama (if used) with the recommended generic Indic scripts poorna virama
* canonicalize two-part dependent vowel signs
* replace colon ':' by visarga if the colon follows a charcter in this script
"""
NUKTA='\u0C3C'
def __init__(self,lang='te',remove_nuktas=False,decompose_nuktas=False,nasals_mode='do_nothing',
do_normalize_chandras=False,do_normalize_vowel_ending=False,do_normalize_numerals=False,convert_numerals_to_native=False,do_colon_to_visarga=False,
do_normalize_nakara_pollu=True):
super(TeluguNormalizer,self).__init__(lang,remove_nuktas,decompose_nuktas,nasals_mode,do_normalize_chandras,do_normalize_vowel_ending,do_normalize_numerals,convert_numerals_to_native,do_colon_to_visarga)
self.do_normalize_nakara_pollu=do_normalize_nakara_pollu
def normalize(self,text):
# common normalization for Indic scripts
text=super(TeluguNormalizer,self).normalize(text)
if self.decompose_nuktas:
# Standardize legacy tsa and dza/za with nuqta instead of numeral 2
text=text.replace('\u0c58','\u0c1a\u0c3c') # ౘ -> చ఼
text=text.replace('\u0c59','\u0c1c\u0c3c') # ౙ -> జ఼
if self.remove_nuktas:
text=text.replace(TeluguNormalizer.NUKTA,'')
text=text.replace('\u0c58','\u0c1a') # ౘ -> చ
text=text.replace('\u0c59','\u0c1c') # ౙ -> జ
if self.do_normalize_nakara_pollu:
text=text.replace('\u0c5d','\u0c28\u0c4d') # ౝ -> న్
# replace the poorna virama codes specific to script
# with generic Indic script codes
text=text.replace('\u0c64','\u0964')
text=text.replace('\u0c65','\u0965')
# dependent vowels
text=text.replace('\u0c46\u0c56','\u0c48')
if self.do_colon_to_visarga: # correct visarge
text=re.sub(r'([\u0c00-\u0c7f]):','\\1\u0c03',text)
return text
def get_char_stats(self,text):
pass
class KannadaNormalizer(BaseNormalizer):
"""
Normalizer for the Kannada script. In addition to basic normalization by the super class,
* Replace the reserved character for poorna virama (if used) with the recommended generic Indic scripts poorna virama
* canonicalize two-part dependent vowel signs
* replace colon ':' by visarga if the colon follows a charcter in this script
"""
NUKTA='\u0CBC'
def __init__(self,lang='kn',remove_nuktas=False,decompose_nuktas=False,nasals_mode='do_nothing',
do_normalize_chandras=False,do_normalize_vowel_ending=False,do_normalize_numerals=False,convert_numerals_to_native=False,do_colon_to_visarga=False):
super(KannadaNormalizer,self).__init__(lang,remove_nuktas,decompose_nuktas,nasals_mode,do_normalize_chandras,do_normalize_vowel_ending,do_normalize_numerals,convert_numerals_to_native,do_colon_to_visarga)
def normalize(self,text):
# common normalization for Indic scripts
text=super(KannadaNormalizer,self).normalize(text)
if self.remove_nuktas:
text=text.replace(KannadaNormalizer.NUKTA,'')
# replace the poorna virama codes specific to script
# with generic Indic script codes
text=text.replace('\u0ce4','\u0964')
text=text.replace('\u0ce5','\u0965')
# dependent vowels
text=text.replace('\u0cbf\u0cd5','\u0cc0')
text=text.replace('\u0cc6\u0cd5','\u0cc7')
text=text.replace('\u0cc6\u0cd6','\u0cc8')
text=text.replace('\u0cc6\u0cc2','\u0cca')
text=text.replace('\u0cca\u0cd5','\u0ccb')
if self.do_colon_to_visarga: # correct visarge
text=re.sub(r'([\u0c80-\u0cff]):','\\1\u0c83',text)
return text
class MalayalamNormalizer(BaseNormalizer):
"""
Normalizer for the Malayalam script. In addition to basic normalization by the super class,
* Replace the reserved character for poorna virama (if used) with the recommended generic Indic scripts poorna virama
* canonicalize two-part dependent vowel signs
* Change from old encoding of chillus (till Unicode 5.0) to new encoding
* replace colon ':' by visarga if the colon follows a charcter in this script
"""
CHILLU_CHAR_MAP= {
'\u0d7a': '\u0d23',
'\u0d7b': '\u0d28',
'\u0d7c': '\u0d30',
'\u0d7d': '\u0d32',
'\u0d7e': '\u0d33',
'\u0d7f': '\u0d15',
# Unicode 9.0
'\u0d54': '\u0d2e',
'\u0d55': '\u0d2f',
'\u0d56': '\u0d34',
}
def _canonicalize_chillus(self,text):
# Note: This will cause confusion between chillu-based virama and half-u
# Recommended to use final_virama_to_half_u_explicit() before this
for chillu, char in MalayalamNormalizer.CHILLU_CHAR_MAP.items():
text=text.replace(chillu,'{}\u0d4d'.format(char))
return text
def _intermediate_virama_to_chillus(self,text):
# Convert intermediate virama-consonants to chillu forms
# Does not convert final virama-consonants, since it's ambiguous (if it's half-u or glottal-stop)
text=re.sub('\u0d23\u0d4d([\u0d00-\u0d7f])','\u0d7a\\1',text)
text=re.sub('\u0d28\u0d4d([\u0d00-\u0d7f])','\u0d7b\\1',text)
text=re.sub('\u0d30\u0d4d([\u0d00-\u0d7f])','\u0d7c\\1',text)
text=re.sub('\u0d32\u0d4d([\u0d00-\u0d7f])','\u0d7d\\1',text)
text=re.sub('\u0d33\u0d4d([\u0d00-\u0d7f])','\u0d7e\\1',text)
text=re.sub('\u0d15\u0d4d([\u0d00-\u0d7f])','\u0d7f\\1',text)
text=re.sub('\u0d2e\u0d4d([\u0d00-\u0d7f])','\u0d54\\1',text)
text=re.sub('\u0d2f\u0d4d([\u0d00-\u0d7f])','\u0d55\\1',text)
text=re.sub('\u0d34\u0d4d([\u0d00-\u0d7f])','\u0d56\\1',text)
return text
def _all_virama_to_chillus(self,text):
# Warning: Use `_intermediate_virama_to_chillus()` unless you know what you're doing
# Convert all virama-consonants to chillu forms
text=text.replace('\u0d23\u0d4d','\u0d7a')
text=text.replace('\u0d28\u0d4d','\u0d7b')
text=text.replace('\u0d30\u0d4d','\u0d7c')
text=text.replace('\u0d32\u0d4d','\u0d7d')
text=text.replace('\u0d33\u0d4d','\u0d7e')
text=text.replace('\u0d15\u0d4d','\u0d7f')
text=text.replace('\u0d2e\u0d4d','\u0d54')
text=text.replace('\u0d2f\u0d4d','\u0d55')
text=text.replace('\u0d34\u0d4d','\u0d56')
return text
def _final_virama_to_half_u_explicit(self,text):
# Chandrakala at the end of word is always half-u
# Make it explicit: അവന് -> അവനു്
return re.sub('([\u0d15-\u0d3a])\u0d4d([^\u0d00-\u0d7f]|$)', '\\1\u0d41\u0d4d\\2', text)
def _final_virama_to_u(self,text):
# By doing this, you'll always implicitly interpret final-u as half-u (as per pre-modern Grammar)
# അവനു് --> അവനു (Assuming explicit-half-u might also have occured in middle positions)
text = re.sub('([\u0d15-\u0d3a])\u0d41\u0d4d', '\\1\u0d41', text)
# അവന് -> അവനു (Only at final positions)
return re.sub('([\u0d15-\u0d3a])\u0d4d([^\u0d00-\u0d7f]|$)', '\\1\u0d41\\2', text)
def _correct_geminated_T(self,text):
return text.replace('\u0d31\u0d4d\u0d31','\u0d1f\u0d4d\u0d1f')
def __init__(self,lang='ml',remove_nuktas=False,decompose_nuktas=False,nasals_mode='do_nothing',do_normalize_chandras=False,
do_normalize_vowel_ending=False,do_normalize_numerals=False,convert_numerals_to_native=False,do_colon_to_visarga=False,
do_explicit_half_u=False,do_canonicalize_chillus=False,do_half_u_to_u=False, do_correct_geminated_T=False,
do_convert_viramas_to_chillus=False,do_convert_all_viramas_to_chillus=False):
super(MalayalamNormalizer,self).__init__(lang,remove_nuktas,decompose_nuktas,nasals_mode,do_normalize_chandras,do_normalize_vowel_ending,do_normalize_numerals,convert_numerals_to_native,do_colon_to_visarga)
self.do_explicit_half_u=do_explicit_half_u
self.do_canonicalize_chillus=do_canonicalize_chillus
self.do_half_u_to_u=do_half_u_to_u
self.do_correct_geminated_T=do_correct_geminated_T
self.do_convert_viramas_to_chillus=do_convert_viramas_to_chillus
self.do_convert_all_viramas_to_chillus=do_convert_all_viramas_to_chillus
def normalize(self,text):
# Change from old encoding of chillus (till Unicode 5.0) to new encoding
# text=text.replace('\u0d28\u0d4d\u0d31','\u0d7b\u0d4d\u0d31') # ന്റ -> ൻ്റ
text=text.replace('\u0d23\u0d4d\u200d','\u0d7a')
text=text.replace('\u0d28\u0d4d\u200d','\u0d7b')
text=text.replace('\u0d30\u0d4d\u200d','\u0d7c')
text=text.replace('\u0d32\u0d4d\u200d','\u0d7d')
text=text.replace('\u0d33\u0d4d\u200d','\u0d7e')
text=text.replace('\u0d15\u0d4d\u200d','\u0d7f')
# Unicode 9.0 introduces 3 new chillus
text=text.replace('\u0d2e\u0d4d\u200d','\u0d54')
text=text.replace('\u0d2f\u0d4d\u200d','\u0d55')
text=text.replace('\u0d34\u0d4d\u200d','\u0d56')
# Malayalam digits 4 & 9 look same as chillus
if not re.search('[\u0d66-\u0d69\u0d6b-\u0d6e]', text):
# Heuristic: If no other mallu digits are present, safely assume those are chillus
text=text.replace('\u0d6a','\u0d7c')
text=text.replace('\u0d6f','\u0d7b')
# Dot reph to chillu r
text=text.replace('\u0d4e','\u0d7c')
# common normalization for Indic scripts
text=super(MalayalamNormalizer,self).normalize(text)
# Vertical/Circular Virama (Old Orthography) to Candrakkala
text=text.replace('\u0d3b','\u0d4d')
text=text.replace('\u0d3c','\u0d4d')
if self.do_explicit_half_u:
text=self._final_virama_to_half_u_explicit(text)
if self.do_half_u_to_u:
text=self._final_virama_to_u(text)
# Normalize chillus
if self.do_canonicalize_chillus:
text=self._canonicalize_chillus(text)
elif self.do_convert_viramas_to_chillus:
text=self._intermediate_virama_to_chillus(text)
elif self.do_convert_all_viramas_to_chillus:
text=self._all_virama_to_chillus(text)
# replace the poorna virama codes specific to script
# with generic Indic script codes
text=text.replace('\u0d64','\u0964')
text=text.replace('\u0d65','\u0965')
# dependent vowels
text=text.replace('\u0d46\u0d3e','\u0d4a') # o
text=text.replace('\u0d47\u0d3e','\u0d4b') # O
# ai form
text=text.replace('\u0d46\u0d46','\u0d48')
# au forms
text=text.replace('\u0d46\u0d57','\u0d4c')
text=text.replace('\u0d57','\u0d4c')
# Old orthographic germination ഺ -> റ്റ
text=text.replace('\u0d3a','\u0d31\u0d4d\u0d31')
# correct geminated T
if self.do_correct_geminated_T:
text=self._correct_geminated_T(text)
if self.do_colon_to_visarga: # correct visarga
text=re.sub(r'([\u0d00-\u0d7f]):','\\1\u0d03',text)
return text
class SinhalaNormalizer(BaseNormalizer):
MISRA_TO_SUDDA_CONSONANTS_MAP = {
# mahA-prAna -> alpa-prAna
'\u0D9B': '\u0D9A', # kh->k
'\u0D9D': '\u0D9C', # gh->g
'\u0DA1': '\u0DA0', # ch->c
'\u0DA3': '\u0DA2', # jh->j
'\u0DA8': '\u0DA7', # .th->.t
'\u0DAA': '\u0DA9', # .dh->.d
'\u0DAE': '\u0DAD', # th->t
'\u0DB0': '\u0DAF', # dh->d
'\u0DB5': '\u0DB4', # ph->p
'\u0DB7': '\u0DB6', # bh->b
# Sibilants
'\u0DC1': '\u0DC3', # ;s -> s
'\u0DC2': '\u0DC3', # .s -> s
# Nasals
# '\u0D9E': '\u0D9F',
'\u0D9E': '\u0DAB', # ;n -> .n (Very approx)
'\u0DA4': '\u0DAB', # ~n -> .n (Very approx)
| |
to the list values
>>> stack = la.constructs.Stack(dft.FeatureInput)
>>> stack_extended = stack_object.add_materials(stack_dict, ['HA', 'PSu'])
>>> stack_extended
defaultdict(<class 'list'>,
{1: ['outer', 400.0, 'HA'], 2: ['inner', 200.0, 'PSu'],
3: ['middle', 800.0, 'HA'], 4: ['inner', 200.0, 'PSu'],
5: ['outer', 400.0, 'HA']})
'''
'''Move this handling and df conversion/extraction to get_FeatureInput'''
##n_materials = len(materials)
nplies = len(stack)
#print('stack materials ', materials)
# Cycler : alternate while iterating a list and add to a dict
for ind, material in enumerate(it.cycle(materials), 1):
#print('material index:', ind)
#print('materials:', material)
clean_values = []
clean_values.extend(stack[ind]) # take extant stack
clean_values.append(material) # add new value
stack[ind] = clean_values
if ind == nplies:
'''Add to verbose mode.'''
#print('Stack materials have been updated.')
return stack
@classmethod
def stack_to_df(cls, stack):
'''Return a DataFrame of converted stacks with materials (list of dicts).'''
df = pd.DataFrame(stack).T
df.reset_index(level=0, inplace=True) # reset index; make new column
df.columns = ['layer', 'type', 't(um)', 'matl'] # rename columns
recolumned = ['layer', 'matl', 'type', 't(um)']
df = ut.set_column_sequence(df, recolumned) # uses ext. f(x)
df[['t(um)']] = df[['t(um)']].astype(float) # reset numeric dtypes
return df
# =============================================================================
# LAMINATES -------------------------------------------------------------------
# =============================================================================
# Create Laminate objects
class Laminate(Stack):
'''Create a `Laminate` object.
Laminate layers are represented as DataFrame rows. `Laminate` inherits from
the `Stack` class.
Native objects:
- `Snapshot` : stack of unique layers (1, 2, ..., n), single rows and ID columns.
- `LFrame` : snapshot with multiple rows including Dimensional Data.
The listed Parameters are "ID Variables" found in `Snapshot`. The Other
Parameters are known as "Dimensional Variables" found in all native objects.
These are special variables pertaining to columns in DataFrames; they are
suffixed with trailing underscores. These parameters are NOT arguements.
Parameters
----------
layer_ : int
Enumerates layers from bottom, tensile side up.
side_ : str
Side of the stress state; tensile (bottom) or compressive (top).
type_ : str
Type of layer; outer, inner or middle.
matl_ : str
Type of material.
t_ : float
Total thickness per layer.
Other Parameters
----------------
label_ : str
Type of point, e.g. interfacial, internal or discontinuity.
h_ : float
Lamina thickness for each lamina, except middle layers (half thickness).
d_ : float
Distance from the bottom layer; handshakes with calculations in
`theories.<Model>` and used in testing. Units (m).
intf_ : int
Enumerates an interfaces from tensile side up.
k_ : float
Relative height; includes fractional height of kth layer.
Z_ : float
Distance from the neutral axis to an interface (or sub-interface p).
z_ : float
Distance from the neutral axis to the lamina midplane (or sub-midplane_p).
Attributes
----------
p
total
summary
is_special
has_discont
has_neutaxis
frame
{Snapshot, LFrame} : DataFrame
Laminate objects.
Methods
-------
to_csv(**kwargs)
Exports LaminateModel data and FeatureInput dashboard as separate files.
to_xlsx(offset=3, **kwargs)
Exports a single file of both LaminateModel data and FeatureInput dashboard.
Raises
------
IndeterminateError
If custom attributes could not be set to the `LaminateModel`.
Handled to rollback and return an LFrame.
See Also
--------
constructs.Stack : base class; initial FeatureInput parser
constructs.LaminateModel : child class; full LM object
theories.BaseModel : handles user defined Laminate Theory models
theories.handshake : gives LFrame data, gets LMFrame back
models : directory containing package models
Examples
--------
>>> import lamana as la
>>> FeatureInput['Geometry'] = la.input_.Geometry('400-[200]-800')
>>> la.constructs.Laminate(FeatureInput)
<lamana Laminate object (400.0-[200.0]-800.0)>
'''
def __init__(self, FeatureInput):
super(Laminate, self).__init__(FeatureInput)
self._type_cache = []
# Laminate Objects
self.Snapshot = self._build_snapshot() # df object; stack
self._primitive = self._build_primitive() # phase 1
self.LFrame = self._build_LFrame() # phase 1; df of IDs; formerly Laminate_
self._frame = self.LFrame # general accessor
def __repr__(self):
return '<lamana {} object ({}), p={}>'.format(
self.__class__.__name__, self.Geometry.__str__(), self.p
)
def __eq__(self, other):
if isinstance(other, self.__class__):
# Auto check attrs if assigned to DataFrames/Series, then add to list
blacklisted = [attr for attr in self.__dict__ if
isinstance(getattr(self, attr), (pd.DataFrame, pd.Series))]
# Check DataFrames and Series
for attrname in blacklisted:
ndf_eq = ut.ndframe_equal(getattr(self, attrname),
getattr(other, attrname))
# Ignore pandas objects; check rest of __dict__ and build trimmed dicts
# Important to blacklist the trimmed dict from looping in __dict__
blacklisted.append('_dict_trim') # prevent infinite loop
self._dict_trim = {
key: value
for key, value in self.__dict__.items()
if key not in blacklisted}
other._dict_trim = {
key: value
for key, value in other.__dict__.items()
if key not in blacklisted}
return ndf_eq and self._dict_trim == other._dict_trim # order is important
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __hash__(self):
'''Allow set comparisons.
The only required property is that objects which compare equal
have the same hash value (REF 035). self.__dict__ is unhashable
due to the inner list. So a copy is made called _geometry_hash
of GeometryTuple with tupled inner instead.
'''
return hash((self.Geometry, self.p))
def _build_snapshot(self):
'''Build a quick, skeletal view of the stack (Snapshot).
Assign materials and stress states to self.stack_order. Optimized by
concatenation; omits looping.
'''
stack_extended = Stack.add_materials(self.stack_order, self.materials)
Snapshot = Stack.stack_to_df(stack_extended)
# TODO: Dehardcode Laminate
return Laminate._set_stresses(Snapshot)
# PHASE 1
def _build_primitive(self):
'''Build a primitive laminate from a stack.
Build in three steps:
1. Adopt the Snapshot and add more rows to each layer.
2. Glue lamina together to make one DataFrame.
3. Add column of expected stress (`side_`).
Returns
-------
DataFrame
An extended snapshot; adds p rows per layer.
'''
df_snap = self.Snapshot.copy()
p = self.FeatureInput['Parameters']['p']
# Replicate Multiple Rows by p
df = pd.concat([df_snap] * p)
# `sort` is deprecated; works in pandas 0.16.2; last worked in lamana 0.4.9
# replaced `sort` with `sort_index` for pandas 0.17.1; backwards compatible
df.sort_index(axis=0, inplace=True)
##df.sort(axis=0, inplace=True)
df.reset_index(drop=True, inplace=True)
# TODO: dehardcode Lamainate for self.__class__
df = Laminate._set_stresses(df)
#print(df)
# Build Laminate with Classes
layers = df.groupby('layer')
self._type_cache = layers['type'].unique()
self._type_cache.apply(str) # converts to str class, not str alone
#self.LFrame = df # retains copy of partial Laminate (IDs & Dimensionals)
return df
# PHASE 2
def _build_LFrame(self):
'''Update Laminate DataFrame with new dimensional columns.
This function takes a primitive LFrame (converted Stack) and adds
columns: `label`, `h(m)`, `d(m)`, `intf`, `k`, `Z(m)`, `z(m)`, `z(m)*`
A number of pandas-like implementations are performed to achieve this,
so the coding has a different approach and feel.
'''
# For Implementation
nplies = self.nplies
p = self.p
t_total = self.total
#print('nplies: {}, p: {}, t_total (m): {}'.format(nplies, p, t_total))
##df = self.LFrame.copy()
df = self._primitive
# WRANGLINGS --------------------------------------------------------------
# Indexers ----------------------------------------------------------------
# Many dimensional values are determined by index positions.
# Revised Indexer
df['idx'] = df.index # temp. index column for idxmin & idxmax
interface_tens = df[df['side'] == 'Tens.'].groupby('layer')['idx'].idxmin()
discontinuity_tens = df[(df['side'] == 'Tens.')
& (df['type'] != 'middle')].groupby('layer')['idx'].idxmax()
discontinuity_comp = df[(df['side'] == 'Comp.')
& (df['type'] != 'middle')].groupby('layer')['idx'].idxmin()
interface_comp = df[df['side'] == 'Comp.'].groupby('layer')['idx'].idxmax()
interface_idx = pd.concat([interface_tens, interface_comp])
discont_idx = pd.concat([discontinuity_tens, discontinuity_comp])
#print(discontinuity_tens.values)
if nplies > 1:
pseudomid = [discontinuity_tens.values[-1],
discontinuity_comp.values[0]] # get disconts indices near neutral axis; for even plies
mid_idx = len(df.index) // 2
#print('middle index: ', mid_idx)
# Indexer dict of outside and inside Indices
idxs = {
'interfaces': interface_idx.values.tolist(), # for interfaces
'disconts': discont_idx.values.tolist(), # for disconts.
'middle': mid_idx, # for neut. axis
'intfTens': interface_tens.values.tolist(), # for side_ interfaces
'intfComp': interface_comp.values.tolist(),
'unboundIntfT': interface_tens.values.tolist()[1:],
'unboundIntfC': interface_comp.values.tolist()[:-1],
'disTens': discontinuity_tens.values.tolist(), # for disconts
'disComp': discontinuity_comp.values.tolist(),
}
# Masks -------------------------------------------------------------------
# Interface Mask
s = df['idx'].copy()
s[:] = False # convert series to bool values
s.loc[idxs['interfaces']] = True
mask = s # boolean mask for interfaces
# COLUMNS -----------------------------------------------------------------
# label_ ------------------------------------------------------------------
# Gives name for point types
df['label'] = np.where(mask, 'interface', 'internal') # yes!; applies values if interface, else internal
if p != 1:
df.loc[idxs['disconts'], 'label'] = 'discont.' # yes!; post-fix for disconts.
if (p % 2 != 0) & ('middle' in df['type'].values):
df.loc[idxs['middle'], 'label'] = 'neut. axis'
internal_idx = df[df['label'] == 'internal'].index.tolist() # additional indexer
# '''Add neut. axis in the middle'''
# h_ ----------------------------------------------------------------------
# Gives the thickness (in m) and height w.r.t to the neut. axis (for middle)
df['h(m)'] = df['t(um)'] * 1e-6
df.loc[df['type'] == 'middle', 'h(m)'] = df['t(um)'] | |
<gh_stars>0
from EXOSIMS.Prototypes.OpticalSystem import OpticalSystem
from EXOSIMS.OpticalSystem.Nemati import Nemati
import astropy.units as u
from astropy.io import fits
import astropy.constants as const
import numpy as np
import scipy.stats as st
import scipy.optimize as opt
import os
from scipy import interpolate
from scipy.optimize import fsolve
class Nemati_2019(Nemati):
"""Nemati Optical System class
This class contains all variables and methods necessary to perform
Optical System Module calculations in exoplanet mission simulation using
the model from Nemati 2014.
Args:
\*\*specs:
user specified values
"""
def __init__(self, **specs):
Nemati.__init__(self, **specs)
#If amici-spec, load Disturb x Sens Tables
#DELETE amici_mode = [self.observingModes[i] for i in np.arange(len(self.observingModes)) if self.observingModes[i]['instName'] == 'amici-spec']
ContrastScenario = [self.observingModes[i]['ContrastScenario'] for i in np.arange(len(self.observingModes)) if 'ContrastScenario' in self.observingModes[i].keys()]
ContrastScenarioIndex = [i for i in np.arange(len(self.observingModes)) if 'ContrastScenario' in self.observingModes[i].keys()]
if np.any(np.asarray(ContrastScenario)=='DisturbXSens'): #DELETElen(amici_mode) > 0:
import csv
#find index of amici_mode
#DELETEamici_mode_index = [i for i in np.arange(len(self.observingModes)) if self.observingModes[i]['instName'] == 'amici-spec'][0] #take first amici-spec instName found
amici_mode_index = [i for i in ContrastScenarioIndex if self.observingModes[i]['ContrastScenario'] == 'DisturbXSens'][0] #take first amici-spec instName found
#Specifically for the Disturb X Sens observing mode (STILL NOT SURE HOW TO DECIDED IT IS DISTURB X SENS MODE)
#C:/Users/Dean/Documents/BijanNemati2019/DisturbXSens_DisturbanceTable.csv
def extractedCSVTable(fname):
"""
Args:
fname (string) - full filepath to the the csv file
Returns:
tList (numpy array) - 2D array of table values [row,col]
"""
tList = list()
with open(fname, newline='') as f:
csvreader = csv.reader(f,delimiter=',')
for row in csvreader:
trow = list()
for i in np.arange(len(row)):
if row[i] == '':
continue
else:
trow.append(float(row[i]))
tList.append(trow)
return np.asarray(tList)
#### LOAD IN Disturbance Table from Disturbance Tab in Bijan2019 spreadsheet
#I copied Disturbance!H6-U1181 to a text file and converted the range into CSV
fname = self.observingModes[amici_mode_index]['DisturbXSens_DisturbanceTable']
self.observingModes[amici_mode_index]['DisturbXSens_DisturbanceTable'] = extractedCSVTable(fname) #Disturbance table on the Disturbance Sheet in Bijan2019 model
self.observingModes[amici_mode_index]['DisturbanceCases'] = ['rqt10hr', 'rqt10hr_1mas', 'rqt171012', 'live', 'rqt10hr171212', 'rqt40hr171212', 'rqt10hr171221', 'rqt40hr171221',\
'cbe10hr171221', 'rqt10hr180109', 'rqt40hr180109', 'cbe10hr180109', 'cbe10hr180130', 'cbe10hr180306'] #Disturbance!H4-U4
#I have no idea what the above clumn labels mean but self.observingModes[amici_mode_index]['DisturbXSens_DisturbanceTableColumnLabels'][0] refers to
# self.observingModes[amici_mode_index]['DisturbXSens_DisturbanceTable'][:,0] #KEEP
####
fname2 = self.observingModes[amici_mode_index]['DisturbXSens_DisturbanceLiveSTD_MUF_Table']#.csv #From DisturbanceLive!B4-AC24
self.observingModes[amici_mode_index]['DisturbXSens_DisturbanceLiveSTD_MUF_Table'] = extractedCSVTable(fname2)
fname3 = self.observingModes[amici_mode_index]['DisturbXSens_DisturbanceLiveUNITY_MUF_Table']#.csv #From DisturbanceLive!B29-AC49
self.observingModes[amici_mode_index]['DisturbXSens_DisturbanceLiveUNITY_MUF_Table'] = extractedCSVTable(fname3)
#DisturbanceLive!$AL is the column-wise interpretation of DisturbXSens_DisturbanceLiveSTD_MUF_Table
self.observingModes[amici_mode_index]['DisturbXSens_DisturbanceTable'][:,3] = np.concatenate((self.observingModes[amici_mode_index]['DisturbXSens_DisturbanceLiveSTD_MUF_Table'].flatten(),\
self.observingModes[amici_mode_index]['DisturbXSens_DisturbanceLiveUNITY_MUF_Table'].flatten()))
#The Disturbance Table starting at CStability!U40 references the DisturbXSens_DisturbanceTable
#### Load Sensitivity MUF Table
fname4 = self.observingModes[amici_mode_index]['DisturbXSens_SensitivityMUF'] #.csv #From SensitivityMUF!C3-G23
self.observingModes[amici_mode_index]['DisturbXSens_SensitivityMUF'] = extractedCSVTable(fname4)
#Index Labels of Sensitivity MUF Table Columns
#KEEPself.observingModes[amici_mode_index]['SensitivityCases'] = ['Standard', 'Unity', 'MUF_o1', 'MUF_o2', 'MUF_o3'] #MUFcases, SensitivityMUF!C1-G1
#Case used for this mode
#self.observingModes[amici_mode_index]['DisturbanceCase']
#### Load Contrast Sensitivity Vectors Table from Sensitivities Tab
fname5 = self.observingModes[amici_mode_index]['DisturbXSens_ContrastSensitivityVectorsTable'] #.csv #From Sensitivities!D5-P109
self.observingModes[amici_mode_index]['DisturbXSens_ContrastSensitivityVectorsTable'] = extractedCSVTable(fname5) #DisturbXSens_ContrastSensitivityVectorsTable.csv
#Column labels of the ContrastVectorsTable
self.observingModes[amici_mode_index]['DisturbXSens_ContrastSensitivityVectorsColLabels'] #Sensitivities!D4-P4
#### Load Annular Zone Master Table
fname6 = self.observingModes[amici_mode_index]['DisturbXSens_AnnZoneMasterTable'] #.csv #From AnnZoneList!C2-O11
self.observingModes[amici_mode_index]['DisturbXSens_AnnZoneMasterTable'] = extractedCSVTable(fname6) #DisturbXSens_AnnZoneMasterTable.csv
#### Load Initial Raw Contrast Table
fname7 = self.observingModes[amici_mode_index]['DisturbXSens_InitialRawContrastTable'] #.csv #From InitialRawContrast!E2-Q21
self.observingModes[amici_mode_index]['DisturbXSens_InitialRawContrastTable'] = extractedCSVTable(fname7) #DisturbXSens_InitialRawContrast.csv
#self.observingModes[amici_mode_index]['DisturbXSens_InitialRawContrastCols']
#### Load NItoContrast Table
fname8 = self.observingModes[amici_mode_index]['DisturbXSens_NItoContrastTable'] #.csv #From NItoContrast!B2-N6 #DisturbXSens_NItoContrastTable.csv
self.observingModes[amici_mode_index]['DisturbXSens_NItoContrastTable'] = extractedCSVTable(fname8) #DisturbXSens_NItoContrastTable.csv
#self.observingModes[amici_mode_index]['DisturbXSens_DisturbanceTable']
#print(saltyburrito)
def Cp_Cb_Csp(self, TL, sInds, fZ, fEZ, dMag, WA, mode, TK=None, returnExtra=False):
""" Calculates electron count rates for planet signal, background noise,
and speckle residuals.
Args:
TL (TargetList module):
TargetList class object
sInds (integer ndarray):
Integer indices of the stars of interest
fZ (astropy Quantity array):
Surface brightness of local zodiacal light in units of 1/arcsec2
fEZ (astropy Quantity array):
Surface brightness of exo-zodiacal light in units of 1/arcsec2
dMag (float ndarray):
Differences in magnitude between planets and their host star
WA (astropy Quantity array):
Working angles of the planets of interest in units of arcsec
mode (dict):
Selected observing mode
TK (TimeKeeping object):
Optional TimeKeeping object (default None), used to model detector
degradation effects where applicable.
returnExtra (boolean):
Optional flag, default False, set True to return additional rates for validation
Returns:
C_p (astropy Quantity array):
Planet signal electron count rate in units of 1/s
C_b (astropy Quantity array):
Background noise electron count rate in units of 1/s
C_sp (astropy Quantity array):
1/s
"""
if TK == None:
t_now = 0.
t_EOL = 63. # mission total lifetime in months taken from the Spreadsheet
else:
t_now = (TK.currentTimeNorm.to(u.d)).value/30.4375 # current time in units of months
t_EOL = TK.missionLife.to('d').value/30.4375
f_ref = self.ref_Time # fraction of time spent on ref star for RDI
if float(f_ref) == 0:
# if f_ref isn't set then assume it's 0.2
f_ref = 0.2
dmag_s = self.ref_dMag # reference star dMag for RDI
ppFact = TL.PostProcessing.ppFact(WA) # post processing factor
# This will match the value of 2 in the spreadsheet and not raise the
# assertion error of ppFact being between 0 and 1
k_pp = 1/ppFact
m_s = TL.Vmag # V magnitude
D_PM = self.pupilDiam # primary mirror diameter in units of m
f_o = self.obscurFac # obscuration due to secondary mirror and spiders
f_s = self.shapeFac # aperture shape factor
lam = mode['lam'] # wavelenght in units of nm
inst_name = mode['instName'] # instrument name
BW = mode['BW'] # bandwidth
syst = mode['syst'] # starlight suppression system
inst = mode['inst'] # instrument dictionary
lam_D = lam.to(u.m)/(D_PM*u.mas.to(u.rad)) # Diffraction limit
F_0 = TL.starF0(sInds,mode)*BW*lam
# Setting in the json file that differentiates between MCBE, ICBE, REQ
try:
CS_setting = syst['core_stability_setting']
except:
CS_setting = 'MCBE'
#Contrast Scenario related to DisturbXSens
if mode['ContrastScenario'] == 'DisturbXSens':
if 'C_CG' in mode.keys() and 'dC_CG' in mode.keys():
C_CG = mode['C_CG'] #SNR!T45
dC_CG = mode['dC_CG']
elif 'sumM' in mode.keys() and 'sumV' in mode.keys() and 'NI_to_Contrast' in mode.keys() and \
'sumdM' in mode.keys() and 'sumdV' in mode.keys():
NI_to_Contrast = mode['NI_to_Contrast']
sumM = mode['sumM']
sumV = mode['sumV']
C_CG = (sumM + sumV)/NI_to_Contrast
sumdM = mode['sumdM']
sumdV = mode['sumdV']
dC_CG = np.sqrt(sumdM**2. + sumdV**2.)/NI_to_Contrast
else: #Load all the csv files
#TODO REPLACE THIS BIT WITH SOMETHING MORE REALSITIC based on CStability!M34
modeNames = ['Z2','Z3','Z4','Z5','Z6','Z7','Z8','Z9','Z10','Z11','Gain Err Z5','Gain Err Z6','Gain Err Z7','Gain Err Z8',\
'Gain Err Z9','Gain Err Z10','Gain Err Z11','Pupil X','Pupil Y','DM Settle','DM Therm']
#Corresponds to CStability!K9-29, T6-T29, T40-60
disturbanceMults = np.asarray([2.87,2.87,1.00e-03,1.00e-03,1.00e-03,1.00e-03,1.00e-03,1.00e-03,1.00e-03,\
1.00e-03,1.00e-03,1.00e-03,1.00e-03,1.00e-03,1.00e-03,1.00e-03,1.00e-03,1.,1.,1.,1.])
disturbanceMultUnits = ['mas rms','mas rms','pm rms','pm rms','pm rms','pm rms','pm rms','pm rms','pm rms','pm rms','pm rms',\
'pm rms','pm rms','pm rms','pm rms','pm rms','pm rms','um rms','um rms','hrs after 2 wks * %/dec','mK']
#Corresponding to CStability!AW40-60
disturbanceCategories = ["ThermalHP", "ThermalLP", "Pupil Shear", "RWA TT", "RWA WFE", "DM Settle", "DM Therm"] #Corresponds to CStability G38-G45
#Disturbance Column for specific scenario
#EXAMPLE mode['DisturbXSens_DisturbanceTable'][:,mode['DisturbanceCaseInd']]
#DisturbanceCase is SNR!T73 for each table column in SensitivityMUF
DisturbanceCaseInd = mode['DisturbanceCases'].index(mode['DisturbanceCase'])
CS_NmodelCoef = mode['modeCoeffs'] # CStability!E23 CS_NmodelCoef
CS_Nstat = mode['statistics'] # CStability!E24 CS_Nstat
CS_Nmech = mode['mechanisms'] # CStability!E25 CS_Nmech
MUFindex = mode['MUFcases'].index(mode['MUFCase']) # CStability!E26 MUFindex, MUFcases Sensitivity!C1-G1
#i is the Mode Number in ModeNames, j is the Disturbance Table Category
modeNameNumbers = np.arange(len(modeNames)) #CStability!U40 Table rows. TODO double check if there are other modes possible...
#### Annular Zone List
#mode['DisturbXSens_AnnZoneMasterTable']
#AnnZoneMasterColLabels AnnZoneList!C1-O1
#SenseCaseSel SNR!T31
AnnZoneTableCol = np.where(np.asarray(mode['AnnZoneMasterColLabels']) == mode['SenseCaseSel'])[0][0]
planetWAListMin = mode['DisturbXSens_AnnZoneMasterTable'][0:5,AnnZoneTableCol]
planetWAListMax = mode['DisturbXSens_AnnZoneMasterTable'][5:,AnnZoneTableCol]
#TODO figure out why the 1e-XX below is used in the spreadsheet
planetPositionalWA = 0.0000000001+WA/(mode['lam']/self.pupilDiam*u.rad).to('mas').decompose() #The correction from SNR!T51 #in units of lam/D
DarkHoleIWA = mode['IWA']/(mode['lam']/self.pupilDiam*u.rad).to('mas').decompose() #in units of lam/D
DarkHoleOWA = mode['OWA']/(mode['lam']/self.pupilDiam*u.rad).to('mas').decompose() #in units of lam/D
planetObservingWA = np.asarray([planetPositionalWA[i].value if planetPositionalWA[i] < DarkHoleOWA else (DarkHoleIWA+0.8*(DarkHoleOWA-DarkHoleIWA)).value for i in np.arange(len(planetPositionalWA))]) #Based on cell SNR!T52
planetAnnZones = np.asarray([np.where((planetWAListMin <= planetObservingWA[i])*(planetWAListMax > planetObservingWA[i]))[0][0] for i in np.arange(len(planetObservingWA))])
#planetAnnZones is an array of AnnZones the size of WA
#NEED TO CHECK IF PLANET ANNZONES ARE IN PROPER RANGE
#M, V, dM, and dV all belong to CStability Disturbance Table, this section converts from the DisturbXSens_DisturbanceTable to the CStability Disturbance Table CStability!U34
self.M_ij_disturbance = np.zeros((len(modeNames),len(disturbanceCategories)*4))
self.V_ij_disturbance = np.zeros((len(modeNames),len(disturbanceCategories)*4))
self.dM_ij_disturbance = np.zeros((len(modeNames),len(disturbanceCategories)*4))
self.dV_ij_disturbance = np.zeros((len(modeNames),len(disturbanceCategories)*4))
for i in np.arange(len(modeNames)): #Iterate down rows of mode names
for j in np.arange(len(disturbanceCategories)):
disturbanceSheetRow = modeNameNumbers[i] + CS_NmodelCoef*( 4*j + CS_Nstat*CS_Nmech*MUFindex)
self.M_ij_disturbance[i,4*j] = mode['DisturbXSens_DisturbanceTable'][disturbanceSheetRow,DisturbanceCaseInd]
disturbanceSheetRow = modeNameNumbers[i] + CS_NmodelCoef*( 4*j+1 + CS_Nstat*CS_Nmech*MUFindex)
self.V_ij_disturbance[i,4*j+1] = mode['DisturbXSens_DisturbanceTable'][disturbanceSheetRow,DisturbanceCaseInd]
disturbanceSheetRow = modeNameNumbers[i] + CS_NmodelCoef*( 4*j+2 + CS_Nstat*CS_Nmech*MUFindex)
self.dM_ij_disturbance[i,4*j+2] = mode['DisturbXSens_DisturbanceTable'][disturbanceSheetRow,DisturbanceCaseInd]
disturbanceSheetRow = modeNameNumbers[i] + CS_NmodelCoef*( 4*j+3 + CS_Nstat*CS_Nmech*MUFindex)
self.dV_ij_disturbance[i,4*j+3] = mode['DisturbXSens_DisturbanceTable'][disturbanceSheetRow,DisturbanceCaseInd]
#### Sensitivity Table from CStability!J8
#Calculate column L of | |
import json
import uuid
from os import path
from atom.ext.django_filters.views import UserKwargFilterSetMixin
from atom.views import (
CreateMessageMixin,
DeleteMessageMixin,
UpdateMessageMixin,
ActionView,
ActionMessageMixin,
)
from braces.views import (
MessageMixin,
FormValidMessageMixin,
SelectRelatedMixin,
PrefetchRelatedMixin,
UserFormKwargsMixin,
)
from guardian.shortcuts import get_anonymous_user
from cached_property import cached_property
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.syndication.views import Feed
from django.core.exceptions import PermissionDenied
from django.core.files.base import ContentFile
from django.db.models import Q
from django.http import HttpResponseBadRequest, JsonResponse
from django.shortcuts import get_object_or_404
from django.urls import reverse_lazy
from django.utils.datetime_safe import datetime
from django.utils.encoding import force_text
from django.utils.feedgenerator import Atom1Feed
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.views import View
from django.views.generic import CreateView, DeleteView, DetailView, FormView
from django_filters.views import FilterView
from extra_views import UpdateWithInlinesView, CreateWithInlinesView
from feder.alerts.models import Alert
from feder.cases.models import Case
from feder.main.mixins import DisableOrderingListViewMixin, PerformantPagintorMixin
from feder.letters.formsets import AttachmentInline
from feder.letters.settings import LETTER_RECEIVE_SECRET
from feder.main.mixins import (
AttrPermissionRequiredMixin,
RaisePermissionRequiredMixin,
BaseXSendFileView,
)
from feder.monitorings.models import Monitoring
from feder.records.models import Record
from .filters import LetterFilter
from .forms import LetterForm, ReplyForm, AssignLetterForm
from .mixins import LetterObjectFeedMixin
from .models import Letter, Attachment
from feder.monitorings.tasks import send_mass_draft
from feder.virus_scan.models import Request as ScanRequest
_("Letters index")
class MixinGzipXSendFile:
def get_sendfile_kwargs(self, context):
kwargs = super().get_sendfile_kwargs(context)
if kwargs["filename"] and kwargs["filename"].endswith(".gz"):
kwargs["encoding"] = "gzip"
filename = path.basename(kwargs["filename"][: -len(".gz")])
kwargs["attachment_filename"] = filename
return kwargs
class CaseRequiredMixin:
def get_queryset(self):
qs = super().get_queryset().exclude(record__case=None)
return qs.attachment_count()
class LetterCommonMixin:
"""
Defines get_queryset and get_permission_object methods.
It should to be specified before permission related mixins.
"""
def get_queryset(self):
return (
super()
.get_queryset()
.exclude(
Q(record__case__isnull=True)
& ~Q(message_type=Letter.MESSAGE_TYPES.mass_draft)
)
.attachment_count()
.with_attachment()
)
def get_permission_object(self):
obj = super().get_object()
return (
obj.mass_draft.monitoring
if obj.is_mass_draft()
else obj.record.case.monitoring
)
class LetterListView(
LetterCommonMixin,
UserKwargFilterSetMixin,
DisableOrderingListViewMixin,
PrefetchRelatedMixin,
SelectRelatedMixin,
PerformantPagintorMixin,
FilterView,
):
filterset_class = LetterFilter
model = Letter
select_related = ["record__case"]
prefetch_related = [
"author_user",
"author_institution",
"record__case__institution",
]
paginate_by = 25
def get_queryset(self):
qs = super().get_queryset()
return qs.attachment_count().for_user(self.request.user)
class LetterDetailView(SelectRelatedMixin, LetterCommonMixin, DetailView):
model = Letter
select_related = ["author_institution", "author_user", "record__case__monitoring"]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if settings.ELASTICSEARCH_SHOW_SIMILAR:
context["similar_list"] = context["object"].get_more_like_this()
context["show_similar"] = settings.ELASTICSEARCH_SHOW_SIMILAR
return context
def get_queryset(self):
qs = super().get_queryset()
return qs.for_user(self.request.user)
class LetterMessageXSendFileView(MixinGzipXSendFile, BaseXSendFileView):
model = Letter
file_field = "eml"
send_as_attachment = True
def get_queryset(self):
qs = super().get_queryset()
return qs.for_user(self.request.user)
class LetterCreateView(
RaisePermissionRequiredMixin,
UserFormKwargsMixin,
CreateMessageMixin,
FormValidMessageMixin,
CreateView,
):
model = Letter
form_class = LetterForm
permission_required = "monitorings.add_letter"
@cached_property
def case(self):
qs = Case.objects.select_related("monitoring").for_user(self.request.user)
return get_object_or_404(qs, pk=self.kwargs["case_pk"])
def get_permission_object(self):
return self.case.monitoring
def get_form_kwargs(self):
kw = super().get_form_kwargs()
kw["case"] = self.case
return kw
class LetterReplyView(
LetterCommonMixin,
RaisePermissionRequiredMixin,
UserFormKwargsMixin,
FormValidMessageMixin,
CreateWithInlinesView,
):
template_name = "letters/letter_reply.html"
model = Letter
form_class = ReplyForm
inlines = [AttachmentInline]
permission_required = "monitorings.add_draft"
@cached_property
def letter(self):
return get_object_or_404(
self.get_queryset()
.select_related("record__case__monitoring")
.for_user(self.request.user),
pk=self.kwargs["pk"],
)
def get_permission_object(self):
return self.letter.case.monitoring
def get_form_kwargs(self):
kw = super().get_form_kwargs()
kw["letter"] = self.letter
return kw
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["object"] = self.letter
return context
def forms_valid(self, form, inlines):
result = super().forms_valid(form, inlines)
if "send" in self.request.POST:
self.object.send()
return result
def get_form_valid_message(self):
if self.object.eml:
return _("Reply {reply} to {letter} saved and send!").format(
letter=self.letter, reply=self.object
)
return _("Reply {reply} to {letter} saved to review!").format(
letter=self.letter, reply=self.object
)
class LetterSendView(
LetterCommonMixin, AttrPermissionRequiredMixin, MessageMixin, ActionView
):
model = Letter
permission_required = "monitorings.reply"
template_name_suffix = "_send"
def action(self):
if self.object.is_mass_draft():
cases_count = self.object.mass_draft.determine_cases().count()
send_mass_draft(self.object.pk)
self.messages.success(
_(
'Message "{letter}" has been scheduled for sending '
"to {count} recipients!"
).format(letter=self.object, count=cases_count),
fail_silently=True,
)
else:
self.object.send()
self.messages.success(
_('Reply "{letter}" has been sent to {institution}!').format(
letter=self.object, institution=self.object.case.institution
),
fail_silently=True,
)
def get_success_url(self):
if self.object.is_mass_draft():
obj = self.object.mass_draft.monitoring
else:
obj = self.object
return obj.get_absolute_url()
def get_queryset(self):
qs = super().get_queryset()
return qs.for_user(self.request.user)
class LetterUpdateView(
LetterCommonMixin,
AttrPermissionRequiredMixin,
UserFormKwargsMixin,
UpdateMessageMixin,
FormValidMessageMixin,
UpdateWithInlinesView,
):
model = Letter
form_class = LetterForm
inlines = [AttachmentInline]
permission_required = "monitorings.change_letter"
def get_queryset(self):
return super().get_queryset().for_user(self.request.user).with_attachment()
class LetterDeleteView(
LetterCommonMixin, AttrPermissionRequiredMixin, DeleteMessageMixin, DeleteView
):
model = Letter
permission_required = "monitorings.delete_letter"
def get_queryset(self):
qs = super().get_queryset()
return qs.for_user(self.request.user)
def get_success_url(self):
if self.object.is_mass_draft():
url = self.object.mass_draft.monitoring.get_absolute_url()
else:
url = self.object.case.get_absolute_url()
return url
class LetterRssFeed(Feed):
title = _("Latest letters on whole site")
link = reverse_lazy("letters:list")
description = _(
"Updates on new letters on site including "
+ "receving and sending in all monitorings."
)
feed_url = reverse_lazy("letters:rss")
description_template = "letters/_letter_feed_item.html"
def items(self):
return (
Letter.objects.with_feed_items()
.exclude(record__case=None)
.recent()
.for_user(get_anonymous_user())
.order_by("-created")[:30]
)
def item_title(self, item):
return item.title
def item_author_name(self, item):
return force_text(item.author)
def item_author_link(self, item):
if item.author:
return item.author.get_absolute_url()
def item_pubdate(self, item):
return item.created
def item_updateddate(self, item):
return item.modified
def item_categories(self, item):
return [
item.case,
item.case.monitoring,
item.case.institution,
item.case.institution.jst,
]
def item_enclosure_url(self, item):
return item.eml.url if item.eml else None
class LetterAtomFeed(LetterRssFeed):
feed_type = Atom1Feed
subtitle = LetterRssFeed.description
feed_url = reverse_lazy("letters:atom")
class LetterMonitoringRssFeed(LetterObjectFeedMixin, LetterRssFeed):
model = Monitoring
filter_field = "record__case__monitoring"
kwargs_name = "monitoring_pk"
def title(self, obj):
return _("Letter for monitoring %s") % force_text(obj)
def description(self, obj):
return _(
"Archive of letter for cases which involved in monitoring %s"
) % force_text(obj)
class LetterMonitoringAtomFeed(LetterMonitoringRssFeed):
feed_type = Atom1Feed
subtitle = LetterMonitoringRssFeed.description
feed_url = reverse_lazy("letters:atom")
class LetterCaseRssFeed(LetterObjectFeedMixin, LetterRssFeed):
model = Case
filter_field = "record__case"
kwargs_name = "case_pk"
def title(self, obj):
return _("Letter for case %s") % force_text(obj)
def description(self, obj):
return _("Archive of letter for case %s") % force_text(obj)
class LetterCaseAtomFeed(LetterCaseRssFeed):
feed_type = Atom1Feed
subtitle = LetterCaseRssFeed.description
feed_url = reverse_lazy("letters:atom")
class LetterReportSpamView(ActionMessageMixin, CaseRequiredMixin, ActionView):
template_name_suffix = "_spam"
model = Letter
def get_queryset(self):
return (
super()
.get_queryset()
.filter(is_spam=Letter.SPAM.unknown)
.for_user(self.request.user)
)
def action(self):
author = None if self.request.user.is_anonymous else self.request.user
Alert.objects.create(
monitoring=self.object.case.monitoring,
reason=_("SPAM"),
author=author,
link_object=self.object,
)
def get_success_message(self):
return _(
"Thanks for your help. The report was forwarded to responsible persons."
)
def get_success_url(self):
return self.object.case.get_absolute_url()
class LetterResendView(
ActionMessageMixin, AttrPermissionRequiredMixin, CaseRequiredMixin, ActionView
):
template_name_suffix = "_resend"
model = Letter
permission_required = "monitorings.reply"
def get_queryset(self):
return (
super()
.get_queryset()
.select_related("record__case__monitoring")
.is_outgoing()
.for_user(self.request.user)
)
def get_permission_object(self):
return self.get_object().case.monitoring
def action(self):
case = self.object.case
self.resend = Letter(
author_user=self.request.user,
record=Record.objects.create(case=case),
title=self.object.title,
body=self.object.body,
)
self.resend.send(commit=True, only_email=False)
def get_success_message(self):
return _("The message was resend.")
def get_success_url(self):
return self.object.case.get_absolute_url()
class LetterMarkSpamView(
RaisePermissionRequiredMixin, CaseRequiredMixin, ActionMessageMixin, ActionView
):
template_name_suffix = "_mark_spam"
model = Letter
permission_required = "monitorings.spam_mark"
accept_global_perms = True
def get_object(self, *args, **kwargs):
if not hasattr(self, "object"):
self.object = super().get_object(*args, **kwargs)
return self.object
def get_permission_object(self):
return self.get_object().case.monitoring
def get_queryset(self):
return (
super()
.get_queryset()
.filter(is_spam=Letter.SPAM.unknown)
.for_user(self.request.user)
)
def action(self):
if "valid" in self.request.POST:
self.object.is_spam = Letter.SPAM.non_spam
else:
self.object.is_spam = Letter.SPAM.spam
self.object.mark_spam_by = self.request.user
self.object.mark_spam_at = datetime.now()
self.object.save(update_fields=["is_spam", "mark_spam_by"])
Alert.objects.link_object(self.object).update(
solver=self.request.user, status=True
)
def get_success_message(self):
if "valid" in self.request.POST:
return _("The letter {object} has been marked as valid.").format(
object=self.object
)
return _("The message {object} has been marked as spam and hidden.").format(
object=self.object
)
def get_success_url(self):
return self.object.case.get_absolute_url()
class UnrecognizedLetterListView(
UserKwargFilterSetMixin,
RaisePermissionRequiredMixin,
PrefetchRelatedMixin,
FilterView,
):
filterset_class = LetterFilter
model = Letter
paginate_by = 10
permission_object = None
permission_required = "letters.recognize_letter"
template_name_suffix = "_unrecognized_list"
select_related = ["record"]
prefetch_related = ["attachment_set"]
ordering = "-pk"
def get_queryset(self):
return (
super()
.get_queryset()
.filter(record__case=None)
.exclude(message_type=Letter.MESSAGE_TYPES.mass_draft)
)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["object_list"] = self.update_object_list(context["object_list"])
return context
def update_object_list(self, object_list):
result = []
for obj in object_list:
obj.assign_form = AssignLetterForm(letter=obj)
result.append(obj)
return result
class AssignLetterFormView(
PrefetchRelatedMixin, RaisePermissionRequiredMixin, SuccessMessageMixin, FormView
):
model = Letter
form_class = AssignLetterForm
permission_object = None
success_url = reverse_lazy("letters:unrecognized_list")
permission_required = "letters.recognize_letter"
template_name = "letters/letter_assign.html"
success_message = _("Assigned letter to case '%(case)s'")
@cached_property
def letter(self):
obj = get_object_or_404(self.model, pk=self.kwargs["pk"])
obj.assign_form = self.form_class(letter=obj)
return obj
def get_context_data(self, **kwargs):
kwargs["object"] = self.letter
return super().get_context_data(**kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["letter"] = self.letter
return kwargs
def form_valid(self, form):
form.save()
return super().form_valid(form)
class AttachmentXSendFileView(MixinGzipXSendFile, BaseXSendFileView):
model = Attachment
file_field = "attachment"
send_as_attachment = True
def get_queryset(self):
return super().get_queryset().for_user(self.request.user)
def get_sendfile_kwargs(self, context):
kwargs = super().get_sendfile_kwargs(context)
if kwargs["filename"].endswith(".gz"):
kwargs["encoding"] = "gzip"
return kwargs
def render_to_response(self, context):
if context["object"].is_infected():
raise PermissionDenied(
"You do not have permission to view that file. "
"The file was considered dangerous."
)
return super().render_to_response(context)
class AttachmentRequestCreateView(ActionMessageMixin, ActionView):
template_name_suffix = "_request_scan"
model = Attachment
def get_object(self, *args, **kwargs):
if not hasattr(self, "object"):
self.object = super().get_object(*args, **kwargs)
return self.object
def get_queryset(self):
return super().get_queryset().for_user(self.request.user)
def action(self):
ScanRequest.objects.create(
content_object=self.object,
field_name="attachment",
)
def get_success_message(self):
return _("The file {} has been queued for scanning").format(self.object)
def get_success_url(self):
return self.object.letter.get_absolute_url()
class ReceiveEmail(View):
required_content_type = "multipart/form-data"
required_version = "v2"
def post(self, request):
if request.GET.get("secret") != LETTER_RECEIVE_SECRET:
raise PermissionDenied
if request.content_type != self.required_content_type:
return HttpResponseBadRequest(
"The request has an invalid Content-Type. "
'The acceptable Content-Type is "{}".'.format(
self.required_content_type
)
)
manifest = json.load(request.FILES["manifest"])
if manifest.get("version") != self.required_version:
return HttpResponseBadRequest(
"The request has an invalid format version. "
'The acceptable format version is "{}".'.format(self.required_version)
)
eml_data = request.FILES["eml"]
letter = self.get_letter(
headers=manifest["headers"],
eml_manifest=manifest["eml"],
text=manifest["text"],
eml_data=eml_data,
)
Attachment.objects.bulk_create(
self.get_attachment(attachment, letter)
for attachment | |
<filename>appserver/neo4japp/blueprints/projects.py
from typing import List, Optional, Tuple, Dict, Iterable
from flask import jsonify, Blueprint, g
from flask.views import MethodView
from marshmallow import ValidationError
from sqlalchemy import and_
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import raiseload, joinedload
from webargs.flaskparser import use_args
from neo4japp.blueprints.auth import auth
from neo4japp.database import db, get_projects_service, get_authorization_service
from neo4japp.exceptions import AccessRequestRequiredError, RecordNotFound
from neo4japp.models import (
AppRole,
AppUser,
Projects,
projects_collaborator_role
)
from neo4japp.models.projects_queries import add_project_user_role_columns, ProjectCalculator
from neo4japp.schemas.common import PaginatedRequestSchema
from neo4japp.schemas.filesystem import (
ProjectListSchema,
ProjectListRequestSchema,
ProjectSearchRequestSchema,
ProjectCreateSchema,
ProjectResponseSchema,
BulkProjectRequestSchema,
BulkProjectUpdateRequestSchema,
MultipleProjectResponseSchema,
ProjectUpdateRequestSchema
)
from neo4japp.schemas.projects import (
ProjectCollaboratorListSchema,
ProjectMultiCollaboratorUpdateRequest
)
from neo4japp.utils.request import Pagination
class ProjectBaseView(MethodView):
"""Base view class for dealing with projects."""
def get_nondeleted_project_query(self, user: AppUser, accessible_only=False):
"""
Return a query for fetching non-deleted projects accessible by the passed
in user. You can add additional filters if needed.
:param user: the user to check
:param accessible_only: true to not include non-accessible projects
:return: the query
"""
t_role = db.aliased(AppRole)
t_user = db.aliased(AppUser)
private_data_access = get_authorization_service().has_role(
user, 'private-data-access'
)
# The following code gets a collection of projects, complete with permission
# information for the current user, all in one go. Unfortunately, it's complex, but
# it should be manageable if the only instance of this kind of code is in one place
# (right here). The upside is that all downstream code, including the client, is very
# simple because all the needed information has already been loaded.
query = db.session.query(Projects) \
.options(joinedload(Projects.root),
raiseload('*')) \
.filter(Projects.deletion_date.is_(None)) \
.distinct()
if accessible_only and not private_data_access:
expected_roles = ['project-read', 'project-write', 'project-admin']
project_role_sq = db.session.query(projects_collaborator_role, t_role.name) \
.join(t_role, t_role.id == projects_collaborator_role.c.app_role_id) \
.join(t_user, t_user.id == projects_collaborator_role.c.appuser_id) \
.subquery()
# This code does an inner join of the necessary role columns, so if the user
# doesn't have the roles, they don't have permission
query = query.join(project_role_sq, and_(project_role_sq.c.projects_id == Projects.id,
project_role_sq.c.appuser_id == user.id,
project_role_sq.c.name.in_(expected_roles)))
# Add extra boolean columns to the result indicating various permissions (read, write, etc.)
# for the current user, which then can be read later by ProjectCalculator or manually
query = add_project_user_role_columns(query, Projects, user.id,
access_override=private_data_access)
return query
def get_nondeleted_project(self, filter):
"""
Returns a project that is guaranteed to be non-deleted that
matches the provided filter.
:param filter: the SQL Alchemy filter
:return: a non-null project
"""
files, *_ = self.get_nondeleted_projects(filter)
if not len(files):
raise RecordNotFound(
title='File Not Found',
message='The requested project could not be found.',
code=404)
return files[0]
def get_nondeleted_projects(self, filter, accessible_only=False, sort=None,
require_hash_ids: List[str] = None,
pagination: Optional[Pagination] = None) \
-> Tuple[List[Projects], int]:
"""
Returns files that are guaranteed to be non-deleted that match the
provided filter.
:param filter: the SQL Alchemy filter
:param accessible_only: true to only get projects accessible by the current user
:param sort: optional list of sort columns
:param pagination: optional pagination
:return: the result, which may be an empty list
"""
current_user = g.current_user
query = self.get_nondeleted_project_query(current_user, accessible_only=accessible_only) \
.order_by(*sort or [])
if filter is not None:
query = query.filter(filter)
if pagination:
paginated_results = query.paginate(pagination.page, pagination.limit)
results = paginated_results.items
total = paginated_results.total
else:
results = query.all()
total = len(results)
projects = []
# We added permission columns to the result of the query, but we need to put them
# into the instances of Projects (since we only return a list of Projects at the end
# of this method)
for row in results:
calculator = ProjectCalculator(row, Projects)
calculator.calculate_privileges([current_user.id])
projects.append(calculator.project)
# Handle helper require_hash_ids argument that check to see if all projected wanted
# actually appeared in the results
if require_hash_ids:
missing_hash_ids = self.get_missing_hash_ids(require_hash_ids, projects)
if len(missing_hash_ids):
raise RecordNotFound(
title='File Not Found',
message=f"The request specified one or more projects "
f"({', '.join(missing_hash_ids)}) that could not be found.",
code=404)
return projects, total
def check_project_permissions(self, projects: List[Projects], user: AppUser,
require_permissions: List[str]):
"""
Helper method to check permissions on the provided projects. On error, an
exception is thrown.
:param projects: the projects to check
:param user: the user to check permissions for
:param require_permissions: a list of permissions to require (like 'writable')
"""
# Check each file
for project in projects:
for permission in require_permissions:
if not getattr(project.calculated_privileges[user.id], permission):
# Do not reveal the project name with the error!
raise AccessRequestRequiredError(
curr_access='no',
req_access=permission,
hash_id=project.hash_id)
def get_project_response(self, hash_id: str, user: AppUser):
"""
Fetch a project and return a response that can be sent to the client. Permissions
are checked and this method will throw a relevant response exception.
:param hash_id: the hash ID of the project
:param user: the user to check permissions for
:return: the response
"""
return_project = self.get_nondeleted_project(Projects.hash_id == hash_id)
self.check_project_permissions([return_project], user, ['readable'])
return jsonify(ProjectResponseSchema(context={
'user_privilege_filter': g.current_user.id,
}).dump({
'result': return_project,
}))
def get_bulk_project_response(self, hash_ids: List[str], user: AppUser, *,
missing_hash_ids: Iterable[str] = None):
projects, total = self.get_nondeleted_projects(Projects.hash_id.in_(hash_ids),
require_hash_ids=hash_ids)
self.check_project_permissions(projects, user, ['readable'])
returned_projects = {}
for project in projects:
returned_projects[project.hash_id] = project
return jsonify(MultipleProjectResponseSchema(context={
'user_privilege_filter': user.id,
}).dump(dict(
mapping=returned_projects,
missing=list(missing_hash_ids) if missing_hash_ids else [],
)))
def update_projects(self, hash_ids: List[str], params: Dict, user: AppUser):
changed_fields = set()
projects, total = self.get_nondeleted_projects(Projects.hash_id.in_(hash_ids))
self.check_project_permissions(projects, user, ['readable'])
missing_hash_ids = self.get_missing_hash_ids(hash_ids, projects)
for project in projects:
for field in ('name', 'description'):
if field in params:
if getattr(project, field) != params[field]:
setattr(project, field, params[field])
changed_fields.add(field)
if len(changed_fields):
try:
db.session.commit()
except IntegrityError as e:
raise ValidationError("The project name is already taken.")
return missing_hash_ids
def get_missing_hash_ids(self, expected_hash_ids: Iterable[str], files: Iterable[Projects]):
found_hash_ids = set(file.hash_id for file in files)
missing = set()
for hash_id in expected_hash_ids:
if hash_id not in found_hash_ids:
missing.add(hash_id)
return missing
class ProjectListView(ProjectBaseView):
decorators = [auth.login_required]
@use_args(ProjectListRequestSchema)
@use_args(PaginatedRequestSchema)
def get(self, params, pagination: Pagination):
"""Endpoint to fetch a list of projects accessible by the user."""
current_user = g.current_user
projects, total = self.get_nondeleted_projects(
None, accessible_only=True,
sort=params['sort'], pagination=pagination,
)
# Not necessary (due to accessible_only=True), but check anyway
self.check_project_permissions(projects, current_user, ['readable'])
return jsonify(ProjectListSchema(context={
'user_privilege_filter': g.current_user.id,
}).dump({
'total': total,
'results': projects,
}))
@use_args(ProjectCreateSchema)
def post(self, params):
"""Endpoint to create a project."""
current_user = g.current_user
project_service = get_projects_service()
project = Projects()
project.name = params['name']
project.description = params['description']
project.creator = current_user
try:
db.session.begin_nested()
project_service.create_project_uncommitted(current_user, project)
db.session.commit()
db.session.flush()
except IntegrityError:
db.session.rollback()
raise ValidationError('The project name already is already taken.', 'name')
db.session.commit()
return self.get_project_response(project.hash_id, current_user)
@use_args(lambda request: BulkProjectRequestSchema())
@use_args(lambda request: BulkProjectUpdateRequestSchema(partial=True))
def patch(self, targets, params):
"""Project update endpoint."""
current_user = g.current_user
missing_hash_ids = self.update_projects(targets['hash_ids'], params, current_user)
return self.get_bulk_project_response(targets['hash_ids'], current_user,
missing_hash_ids=missing_hash_ids)
class ProjectSearchView(ProjectBaseView):
decorators = [auth.login_required]
@use_args(ProjectSearchRequestSchema)
@use_args(PaginatedRequestSchema)
def post(self, params: dict, pagination: Pagination):
"""Endpoint to search for projects that match certain criteria."""
current_user = g.current_user
projects, total = self.get_nondeleted_projects(
Projects.name == params['name'],
accessible_only=True,
sort=params['sort'],
pagination=pagination,
)
# Not necessary (due to accessible_only=True), but check anyway
self.check_project_permissions(projects, current_user, ['readable'])
return jsonify(ProjectListSchema(context={
'user_privilege_filter': g.current_user.id,
}).dump({
'total': total,
'results': projects,
}))
class ProjectDetailView(ProjectBaseView):
decorators = [auth.login_required]
def get(self, hash_id: str):
"""Endpoint to fetch a project by hash ID."""
current_user = g.current_user
return self.get_project_response(hash_id, current_user)
@use_args(lambda request: ProjectUpdateRequestSchema(partial=True))
def patch(self, params: dict, hash_id: str):
"""Update a single project."""
current_user = g.current_user
self.update_projects([hash_id], params, current_user)
return self.get(hash_id)
class ProjectCollaboratorsListView(ProjectBaseView):
decorators = [auth.login_required]
def get_bulk_collaborator_response(self, hash_id, pagination: Pagination):
"""
Generate a list of colloborators for aproject.
"""
current_user = g.current_user
project = self.get_nondeleted_project(Projects.hash_id == hash_id)
self.check_project_permissions([project], current_user, ['administrable'])
query = db.session.query(AppUser, AppRole.name) \
.join(projects_collaborator_role,
AppUser.id == projects_collaborator_role.c.appuser_id) \
.join(AppRole, AppRole.id == projects_collaborator_role.c.app_role_id) \
.filter(projects_collaborator_role.c.projects_id == project.id)
paginated_result = query.paginate(pagination.page, pagination.limit, False)
return jsonify(ProjectCollaboratorListSchema().dump({
'results': [{
'user': item[0],
'role_name': item[1],
} for item in paginated_result.items]
}))
@use_args(PaginatedRequestSchema)
def get(self, pagination: Pagination, hash_id):
"""Endpoint to fetch a list of collaborators for a project."""
return self.get_bulk_collaborator_response(hash_id, pagination)
@use_args(ProjectMultiCollaboratorUpdateRequest)
def post(self, params, hash_id):
proj_service = get_projects_service()
current_user = g.current_user
private_data_access = get_authorization_service().has_role(
current_user, 'private-data-access'
)
project = self.get_nondeleted_project(Projects.hash_id == hash_id)
self.check_project_permissions([project], current_user, ['administrable'])
user_hash_ids = set([item['user_hash_id'] for item in params['update_or_create']] +
params['remove_user_hash_ids'])
role_names = set([item['role_name'] for item in params['update_or_create']])
target_users = db.session.query(AppUser) \
.filter(AppUser.hash_id.in_(user_hash_ids)) \
.options(raiseload('*')) \
.all()
roles = db.session.query(AppRole) \
.filter(AppRole.name.in_(role_names)) \
.options(raiseload('*')) \
.all()
if len(target_users) != len(user_hash_ids):
raise ValidationError(f"One or more specified users does not exist.")
if len(roles) != len(role_names):
raise ValidationError(f"One or more specified roles does not exist.")
user_map = {}
for user in target_users:
user_map[user.hash_id] = user
role_map = {}
for role in roles:
role_map[role.name] = role
| |
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: menu.py
# Purpose:
#
# Author: wukan
#
# Created: 2019-01-16
# Copyright: (c) wukan 2019
# Licence: GPL-3.0
#-------------------------------------------------------------------------------
import tkinter as tk
from tkinter import ttk
import noval.consts as consts
from noval import _,GetApp
import collections
from noval.binds import *
import noval.util.utils as utils
import copy
import os
import noval.misc as misc
import json
import noval.constants as constants
#MenuItem = collections.namedtuple("MenuItem", ["id","label","accelerator","image","tester"])
class MenuItem:
def __init__(self,id_,label,accelerator,image,tester):
self.id = id_
self.label = label
self.accelerator = accelerator
self.image = image
self.tester = tester
class KeyBinder(object):
"""Class for managing keybinding configurations"""
cprofile = None # Current Profile Name String
key_binds = copy.copy(DEFAULT_KEY_BINDS) # Active Profile (dict)
KEY_BINDING_FILE = "keybinding.json"
def __init__(self):
"""Create the KeyBinder object"""
object.__init__(self)
# Attributes
self.cache = None
def LoadCacheKeybinds(self):
'''
从配置文件中加载自定义快捷键配置
'''
self.GetCachedir()
key_binding_file = os.path.join(self.cache,self.KEY_BINDING_FILE)
if os.path.exists(key_binding_file):
try:
with open(key_binding_file) as f:
data = json.load(f)
binds = {}
for key in data:
binds[getattr(constants,key)] = data[key]
#替换默认快捷键配置
KeyBinder.key_binds = binds
except:
utils.get_logger().exception("")
def GetCachedir(self):
if self.cache is None:
self.cache = os.path.join(utils.get_user_data_path(),consts.USER_CACHE_DIR)
return self.cache
@classmethod
def CheckKeybindsConflict(cls):
'''
检查快捷键冲突
'''
accels = list(cls.key_binds.values())
for accel in accels:
temp_list = accels
temp_list.remove(accel)
if temp_list.count(accel) != 0:
raise RuntimeError("accelerator %s is conflicted...." % accel)
def GetBinding(self, item_id,accelerator=None):
"""
获取菜单id对应的快捷键,并转化成tk内部识别的快捷键组合
@param item_id: Menu Item Id
@return: string,string
"""
if accelerator is None:
#如果用户没有指定快捷键则从配置字典中加载是否存在快捷键
accelerator = self.GetRawBinding(item_id)
if accelerator is not None:
#将快捷键转换成tkinter识别的快捷键字符串,如Ctrl+O转换成<Control-o>
sequence = misc.accelerator_to_sequence(accelerator)
return accelerator,sequence
return None,None
@classmethod
def GetCurrentProfile(cls):
"""Get the name of the currently set key profile if one exists
@param cls: Class Object
@return: string or None
"""
return cls.cprofile
@classmethod
def GetCurrentProfileDict(cls):
"""Get the dictionary of keybindings
@param cls: Class Object
@return: dict
"""
return cls.keyprofile
@staticmethod
def GetKeyProfiles():
"""Get the list of available key profiles
@return: list of strings
"""
recs = util.GetResourceFiles(u'cache', trim=True, get_all=False,
suffix='.ekeys', title=False)
if recs == -1:
recs = list()
tmp = util.GetResourceFiles(u'ekeys', True, True, '.ekeys', False)
if tmp != -1:
recs.extend(tmp)
return recs
def GetProfilePath(self, pname):
"""Get the full path to the given keyprofile
@param pname: profile name
@return: string or None
@note: expects unique name for each profile in the case that
a name exists in both the user and system paths the one
found on the user path will be returned.
"""
if pname is None:
return None
rname = None
for rec in self.GetKeyProfiles():
if rec.lower() == pname.lower():
rname = rec
break
# Must be a new profile
if rname is None:
rname = pname
kprof = u"%s%s.ekeys" % (ed_glob.CONFIG['CACHE_DIR'], rname)
if not os.path.exists(kprof):
# Must be a system supplied keyprofile
rname = u"%s%s.ekeys" % (ed_glob.CONFIG['KEYPROF_DIR'], rname)
if not os.path.exists(rname):
# Doesn't exist at syspath either so instead assume it is a new
# custom user defined key profile.
rname = kprof
else:
rname = kprof
return rname
@classmethod
def GetRawBinding(cls, item_id):
"""Get the raw key binding tuple
@param cls: Class Object
@param item_id: MenuItem Id
@return: tuple
"""
return cls.key_binds.get(item_id, None)
@classmethod
def FindMenuId(cls, keyb):
"""Find the menu item ID that the
keybinding is currently associated with.
@param cls: Class Object
@param keyb: tuple of unicode (u'Ctrl', u'C')
@return: int (-1 if not found)
"""
menu_id = -1
for key, val in cls.keyprofile.iteritems():
if val == keyb:
menu_id = key
break
return menu_id
@classmethod
def LoadDefaults(cls):
"""Load the default key profile"""
cls.keyprofile = dict(_DEFAULT_BINDING)
cls.cprofile = None
def LoadKeyProfile(self, pname):
"""Load a key profile from profile directory into the binder
by name.
@param pname: name of key profile to load
"""
if pname is None:
ppath = None
else:
ppath = self.GetProfilePath(pname)
self.LoadKeyProfileFile(ppath)
def LoadKeyProfileFile(self, path):
"""Load a key profile from the given path
@param path: full path to file
"""
keydict = dict()
pname = None
if path:
pname = os.path.basename(path)
pname = pname.rsplit('.', 1)[0]
if pname is not None and os.path.exists(path):
reader = util.GetFileReader(path)
if reader != -1:
util.Log("[keybinder][info] Loading KeyProfile: %s" % path)
for line in reader:
parts = line.split(u'=', 1)
# Check that the line was formatted properly
if len(parts) == 2:
# Try to find the ID value
item_id = _GetValueFromStr(parts[0])
if item_id is not None:
tmp = [ part.strip()
for part in parts[1].split(u'+')
if len(part.strip()) ]
# Do some checking if the binding is valid
nctrl = len([key for key in tmp
if key not in (u'Ctrl', u'Alt', u'Shift')])
if nctrl:
if parts[1].strip().endswith(u'++'):
tmp.append(u'+')
kb = tuple(tmp)
if kb in keydict.values():
for mid, b in keydict.iteritems():
if kb == b:
del keydict[mid]
break
keydict[item_id] = tuple(tmp)
else:
# Invalid key binding
continue
reader.close()
KeyBinder.keyprofile = keydict
KeyBinder.cprofile = pname
return
else:
util.Log("[keybinder][err] Couldn't read %s" % path)
elif pname is not None:
# Fallback to default keybindings
util.Log("[keybinder][err] Failed to load bindings from %s" % pname)
util.Log("[keybinder][info] Loading Default Keybindings")
KeyBinder.LoadDefaults()
def SaveKeyProfile(self):
"""Save the current key profile to disk"""
if KeyBinder.cprofile is None:
util.Log("[keybinder][warn] No keyprofile is set, cant save")
else:
ppath = self.GetProfilePath(KeyBinder.cprofile)
writer = util.GetFileWriter(ppath)
if writer != -1:
itemlst = list()
for item in KeyBinder.keyprofile.keys():
itemlst.append(u"%s=%s%s" % (_FindStringRep(item),
self.GetBinding(item).lstrip(),
os.linesep))
writer.writelines(sorted(itemlst))
writer.close()
else:
util.Log("[keybinder][err] Failed to open %s for writing" % ppath)
@classmethod
def SetBinding(cls, item_id, keys):
"""Set the keybinding of a menu id
@param cls: Class Object
@param item_id: item to set
@param keys: string or list of key strings ['Ctrl', 'S']
"""
if isinstance(keys, basestring):
keys = [ key.strip() for key in keys.split(u'+')
if len(key.strip())]
keys = tuple(keys)
if len(keys):
# Check for an existing binding
menu_id = cls.FindMenuId(keys)
if menu_id != -1:
del cls.keyprofile[menu_id]
# Set the binding
cls.keyprofile[item_id] = keys
elif item_id in cls.keyprofile:
# Clear the binding
del cls.keyprofile[item_id]
else:
pass
@classmethod
def SetProfileName(cls, pname):
"""Set the name of the current profile
@param cls: Class Object
@param pname: name to set profile to
"""
cls.cprofile = pname
@classmethod
def SetProfileDict(cls, keyprofile):
"""Set the keyprofile using a dictionary of id => bindings
@param cls: Class Object
@param keyprofile: { menu_id : (u'Ctrl', u'C'), }
"""
cls.keyprofile = keyprofile
class PopupMenu(tk.Menu):
"""Custom wxMenu class that makes it easier to customize and access items.
"""
def __init__(self,master=None,**kw):
"""Initialize a Menu Object
@param title: menu title string
@param style: type of menu to create
"""
tk.Menu.__init__(self,master=master,tearoff=False,**kw)
self.images = []
self._items = []
self._submenus = []
@property
def SubMenus(self):
return self._submenus
def GetMenuData(self,id_,text,handler,img,accelerator,kind,variable,tester):
text = MenubarMixin.FormatMenuName(text)
menu_item = MenuItem(id_,text,accelerator,img,tester)
kwargs = dict(label=text,command=handler)
if img is not None:
#设置图像在文字的左边
kwargs.update(dict(image=img,compound=tk.LEFT))
#tkinter并不保存图像,必须在此处添加到图片列表以便永久保存
self.images.append(img)
if accelerator is not None:
kwargs.update(dict(accelerator=accelerator))
if kind == consts.CHECK_MENU_ITEM_KIND or kind == consts.RADIO_MENU_ITEM_KIND:
#设置复选或者单选菜单关联变量,鼠标点击菜单时相应的变量值会改变
if variable is not None:
kwargs.update(dict(variable = variable))
return menu_item,kwargs
def Append(self, id_, text, helpstr=u'', handler=None,img=None,accelerator=None,\
kind=consts.NORMAL_MENU_ITEM_KIND,variable=None,tester=None,**extra_args):
"""Append a MenuItem
@param id_: New MenuItem ID
@keyword text: Menu Label
@keyword helpstr: Help String
@keyword kind: MenuItem type
@keyword use_bmp: try and set a bitmap if an appropriate one is
available in the ArtProvider
"""
menu_item,kwargs = self.GetMenuData(id_,text,handler,img,accelerator,kind,variable,tester)
kwargs.update(extra_args)
self._items.append(menu_item)
if kind == consts.NORMAL_MENU_ITEM_KIND:
self.add_command(**kwargs)
elif kind == consts.CHECK_MENU_ITEM_KIND:
self.add_checkbutton(**kwargs)
elif kind == consts.RADIO_MENU_ITEM_KIND:
self.add_radiobutton(**kwargs)
def AppendMenuItem(self, item,handler=None,**kwargs):
"""Appends a MenuItem to the menu and adds an associated
bitmap if one is available, unless use_bmp is set to false.
@param item: wx.MenuItem
@keyword use_bmp: try and set a bitmap if an appropriate one is
available in the ArtProvider
"""
#用户如果指定快捷键使用用户指定的快捷键
if 'accelerator' not in kwargs:
accelerator=item.accelerator
else:
accelerator=kwargs.get('accelerator')
#用户如果指定tester函数使用用户指定的tester函数
if 'tester' not in kwargs:
tester=item.tester
else:
tester=kwargs.get('tester')
#用户如果指定图标使用用户指定的图标
if 'image' not in kwargs:
image=item.image
else:
image=kwargs.get('image')
self.Append(item.id,item.label,handler=handler,img = image,accelerator=accelerator,tester=tester)
def AppendMenu(self,id_, text,menu):
text = MenubarMixin.FormatMenuName(text)
self._submenus.append((id_,menu))
menu_menu_item = MenuItem(id_,text,None,None,None)
self._items.append(menu_menu_item)
self.add_cascade(label= text, menu=menu)
def InsertMenu(self,pos,id_, text,menu):
text = MenubarMixin.FormatMenuName(text)
self._submenus.append((id_,menu))
menu_menu_item = MenuItem(id_,text,None,None,None)
self._items.insert(pos,menu_menu_item)
self.insert(
pos,
"cascade",
label= text,
menu=menu
)
return menu_menu_item
def InsertMenuAfter(self,item_id,id_, text,menu):
'''
插入某个子菜单
'''
pos = -1
for i,menu_item in enumerate(self._items):
if menu_item.id == item_id:
pos = i
break
if pos >-1:
mitem = self.InsertMenu(pos + 1, id_, text, menu)
else:
mitem = self.AppendMenu(id_, text,menu)
return mitem
def GetMenu(self,id_):
for menu in self._submenus:
if menu[0] == id_:
return menu[1]
return None
def | |
converted and there now can be duplicates (same _id)
# (ex: mygene, ensembl -> entrez conversion). "docs" could produce a duplicated error
# within the batch, so we need to remove duplicates.
all_ids = [d["_id"] for d in docs]
uniq_ids = set(all_ids)
if len(all_ids) != len(uniq_ids):
logging.warning("Found duplicated IDs within batch, trying to fix")
docs = fix_batch_duplicates(docs)
if merger == "merge_struct":
stored_docs = dest.mget_from_ids([d["_id"] for d in docs])
ddocs = dict([(d["_id"], d) for d in docs])
for d in stored_docs:
ddocs[d["_id"]] = merge_struct(d, ddocs[d["_id"]])
docs = list(ddocs.values())
cnt = dest.update(docs, upsert=upsert)
return cnt
except Exception as e:
logger_name = "build_%s_%s_batch_%s" % (dest_name, col_name, batch_num)
logger, _ = get_logger(logger_name, btconfig.LOG_FOLDER)
logger.exception(e)
logger.error("col_name: %s, dest_name: %s, ids: see pickle, " % (col_name, dest_name)
+ "mapper: %s, cleaner: %s, upsert: %s, " % (mapper, cleaner, upsert)
+ "merger: %s, batch_num: %s" % (merger, batch_num))
exc_fn = os.path.join(btconfig.LOG_FOLDER, "%s.exc.pick" % logger_name)
pickle.dump(e, open(exc_fn, "wb"))
logger.info("Exception was dumped in pickle file '%s'" % exc_fn)
ids_fn = os.path.join(btconfig.LOG_FOLDER, "%s.ids.pick" % logger_name)
pickle.dump(ids, open(ids_fn, "wb"))
logger.info("IDs dumped in pickle file '%s'" % ids_fn)
dat_fn = os.path.join(btconfig.LOG_FOLDER,
"%s.docs.pick" % logger_name)
pickle.dump(docs, open(dat_fn, "wb"))
logger.info("Data (batch of docs) dumped in pickle file '%s'" % dat_fn)
raise
def set_pending_to_build(conf_name=None):
src_build_config = get_src_build_config()
qfilter = {}
if conf_name:
qfilter = {"_id": conf_name}
logging.info("Setting pending_to_build flag for configuration(s): %s" %
(conf_name and conf_name or "all configuraitons"))
src_build_config.update(qfilter, {"$addToSet": {"pending": "build"}})
class BuilderManager(BaseManager):
def __init__(self,
source_backend_factory=None,
target_backend_factory=None,
builder_class=None,
poll_schedule=None,
*args,
**kwargs):
"""
BuilderManager deals with the different builders used to merge datasources.
It is connected to src_build() via sync(), where it grabs build information
and register builder classes, ready to be instantiate when triggering builds.
source_backend_factory can be a optional factory function (like a partial) that
builder can call without any argument to generate a SourceBackend.
Same for target_backend_factory for the TargetBackend. builder_class if given
will be used as the actual Builder class used for the merge and will be passed
same arguments as the base DataBuilder. It can also be a list of classes, in which
case the default used one is the first, when it's necessary to define multiple builders.
"""
super(BuilderManager, self).__init__(*args, **kwargs)
self.src_build_config = get_src_build_config()
self.source_backend_factory = source_backend_factory
self.target_backend_factory = target_backend_factory
builder_class = builder_class or DataBuilder
if isinstance(builder_class, list):
self.arg_builder_classes = builder_class
else:
self.arg_builder_classes = [builder_class]
self.default_builder_class = self.arg_builder_classes[0] or DataBuilder
self.builder_classes = {}
self.poll_schedule = poll_schedule
self.setup_log()
def clean_stale_status(self):
src_build = get_src_build()
for build in src_build.find():
dirty = False
for job in build.get("jobs", []):
if job.get("status") == "building":
logging.warning(
"Found stale build '%s', marking build status as 'canceled'"
% build["_id"])
job["status"] = "canceled"
dirty = True
if dirty:
src_build.replace_one({"_id": build["_id"]}, build)
@property
def source_backend(self):
source_backend = self.source_backend_factory and self.source_backend_factory() or \
partial(SourceDocMongoBackend,
build_config=partial(get_src_build_config),
build=partial(get_src_build),
master=partial(get_src_master),
dump=partial(get_src_dump),
sources=partial(mongo.get_src_db))
return source_backend
@property
def target_backend(self):
target_backend = self.target_backend_factory and self.target_backend_factory() or \
partial(TargetDocMongoBackend,
target_db=partial(mongo.get_target_db))
return target_backend
def get_builder_class(self, build_config_name):
"""
builder class can be specified different way (in order):
1. within the build_config document (so, per configuration)
2. or defined in the builder manager (so, per manager)
3. or default to DataBuilder
"""
builder_class = None
conf = self.src_build_config.find_one({"_id": build_config_name})
if conf.get("builder_class"):
builder_class = self.builder_classes[
conf["builder_class"]]["class"]
elif self.default_builder_class:
builder_class = self.default_builder_class
else:
builder_class = DataBuilder
return builder_class
def register_builder(self, build_name):
# will use partial to postponse object creations and their db connection
# as we don't want to keep connection alive for undetermined amount of time
# declare source backend
def create(build_name):
# postpone config import so app had time to set it up
# before actual call time
from biothings import config
# assemble the whole
klass = self.get_builder_class(build_name)
self.logger.info("Build config '%s' will use builder class %s",
build_name, klass)
bdr = klass(build_name,
source_backend=self.source_backend,
target_backend=self.target_backend,
log_folder=config.LOG_FOLDER)
return bdr
self.register[build_name] = partial(create, build_name)
def get_builder(self, col_name):
doc = get_src_build().find_one({"_id": col_name})
if not doc:
raise BuilderException("No such build named '%s'" % repr(col_name))
assert "build_config" in doc, "Expecting build_config information"
klass = self.get_builder_class(doc["build_config"]["name"])
bdr = klass(doc["build_config"]["name"],
source_backend=self.source_backend,
target_backend=self.target_backend,
log_folder=btconfig.LOG_FOLDER)
# overwrite with existing values
bdr.build_config = doc["build_config"]
bdr.target_backend.set_target_name(col_name)
return bdr
def delete_merged_data(self, merge_name):
target_db = mongo.get_target_db()
col = target_db[merge_name]
col.drop()
def delete_merge(self, merge_name):
"""Delete merged collections and associated metadata"""
db = get_src_build()
meta = db.find_one({"_id": merge_name})
if meta:
db.remove({"_id": merge_name})
else:
self.logger.warning(
"No metadata found for merged collection '%s'" % merge_name)
self.delete_merged_data(merge_name)
def archive_merge(self, merge_name):
"""Delete merged collections and associated metadata"""
db = get_src_build()
meta = db.find_one({"_id": merge_name})
if meta:
meta["archived"] = datetime.now()
db.replace_one({"_id": merge_name}, meta)
else:
self.logger.warning(
"No metadata found for merged collection '%s'" % merge_name)
self.delete_merged_data(merge_name)
def get_query_for_list_merge(self, only_archived):
q = {"archived": {"$exists": 0}}
if only_archived:
q = {"archived": {"$exists": 1}}
return q
def list_merge(self, build_config=None, only_archived=False):
q = self.get_query_for_list_merge(only_archived)
docs = get_src_build().find(q)
by_confs = {}
for d in docs:
by_confs.setdefault(
d.get("build_config", {}).get("name", None),
[]).append(d["_id"])
if build_config:
return sorted(by_confs.get(build_config, []))
else:
for conf in by_confs:
by_confs[conf] = sorted(by_confs[conf])
return by_confs
def setup_log(self):
self.logger, self.logfile = get_logger("buildmanager")
def __getitem__(self, build_name):
"""
Return an instance of a builder for the build named 'build_name'
Note: each call returns a different instance (factory call behind the scene...)
"""
# we'll get a partial class but will return an instance
pclass = BaseManager.__getitem__(self, build_name)
return pclass()
def configure(self):
"""Sync with src_build_config and register all build config"""
self.register = {}
self.builder_classes = {}
for conf in self.src_build_config.find():
self.register_builder(conf["_id"])
self.find_builder_classes()
def resolve_builder_class(self, klass):
"""
Resolve class/partial definition to (obj,"type","mod.class")
where names (class name, module, docstring, etc...) can
directly be accessed whether it's a standard class or not
"""
obj = klass
if type(klass) == partial:
assert type(klass.func) == type
btype = "partial"
obj = klass.func
elif type(klass) == type:
btype = "class"
else:
raise TypeError("Unknown type for builder %s" % repr(klass))
modstr = obj.__module__
classstr = obj.__name__
classpathstr = "%s.%s" % (modstr, classstr)
return (obj, btype, classpathstr)
def find_builder_classes(self):
"""
Find all available build class:
1. classes passed during manager init (build_class)
(that includes the default builder)
2. all subclassing DataBuilder in:
a. biothings.hub.databuilder.*
b. hub.databuilder.* (app-specific)
"""
bclasses = set(self.arg_builder_classes)
mods = [sys.modules[__name__]]
try:
import hub.databuild as m
mods.append(m)
except ImportError:
pass
for klass in find_classes_subclassing(mods, DataBuilder):
bclasses.add(klass)
for klass in bclasses:
try:
obj, btype, classpathstr = self.resolve_builder_class(klass)
helpstr = obj.__doc__ and " ".join(
map(str.strip, obj.__doc__.splitlines()))
self.builder_classes[classpathstr] = {
"desc": helpstr,
"type": btype,
"class": klass,
"default": klass == self.default_builder_class,
}
except Exception as e:
logging.exception(
"Can't extract information from builder class %s: %s" %
(repr(klass), e))
def merge(self, build_name, sources=None, target_name=None, **kwargs):
"""
Trigger a merge for build named 'build_name'. Optional list of sources can be
passed (one single or a list). target_name is the target collection name used
to store to merge data. If none, each call will generate a unique target_name.
"""
try:
bdr = self[build_name]
job = bdr.merge(sources,
target_name,
job_manager=self.job_manager,
**kwargs)
return job
except KeyError:
raise BuilderException("No such builder for '%s'" % build_name)
except ResourceNotReady as e:
raise BuilderException(
"Some datasources aren't ready for the merge: %s" % e)
def list_sources(self, build_name):
"""
List all registered sources used to trigger a build named 'build_name'
"""
info = self.src_build_config.find_one({"_id": build_name})
return info and info["sources"] or []
def whatsnew(self, build_name=None, old=None):
"""
Return datasources which have changed since last time
(last time is datasource information from metadata, either from
given old src_build doc name, or the latest found if old=None)
"""
dbbuild = get_src_build()
dbdump = get_src_dump()
def whatsnewcomparedto(build_name, old=None):
if old is None:
# TODO: this will get big... but needs to be generic
# because we handle different hub db backends (or it needs to be a
# specific helper func to be defined all backends
# FIXME: this gets slower as hub gets more builds, we are
# finding all builds of all build configs when /whatsnew gets
# requested
builds = dbbuild.find({"build_config.name": build_name})
builds = sorted(builds, key=lambda e: e["started_at"])
if builds:
old = builds[-1]
else:
raise BuilderException(
"Can't find a build associated to config '%s'" | |
line = word
word_width = f.size(word)[0]
line_width = word_width
# Some part of a line might be left.
if line:
new_lines.append(line)
line = ""
line_width = 0
else:
# A blank line is being added to old_text.
new_lines = list(old_text)
new_lines.append("")
# Check if height is calculated:
if return_height:
# Check if line_height needs to be set:
if not line_height:
line_height = f.get_linesize()
height = height_of_strings(new_lines, f, line_height)
return new_lines, height
return new_lines
def string_to_screens_and_lines(source, allowed_width, allowed_height, f, pixels_between_lines = None, end_screens_with = (), do_not_include = ()):
"""
Convert a string to screens and lines.
Pygame does not allow line breaks ("\n") when rendering text. The purpose
of this function is to break a string into lines and screens given a font
and screen dimensions.
The following two assumptions are made:
1. Line breaks ("\n") in source denote the start of a new paragraph.
Therefore, to have an actual blank line (i.e., an empty string)
appear in the returned array, add another "\n" immediately
following the first.
2. Spaces denote the end of a word.
Parameters:
source: the string to divide into screens and lines.
allowed_width: the width, in pixels, permitted for lines; can be a
number of pixels or a proportion of the active screen's width.
allowed_height: same as allowed_width but for the height of a single
screen.
f: the font with which source is measured.
Keyword Parameters:
pixels_between_lines: blank pixel rows between lines of text; defaults
to None, in which case it is obtained from f.
end_screens_with: a restricted set of characters that may end a
screen; defaults to an empty tuple, in which case any character
ending a word can end a screen.
do_not_include: words that are exceptions to the end_screens_with
words (e.g., "Mrs." ends in a period but should not end a screen)
Returns:
screens: a multidimensional list of screens and lines.
"""
# Check if allowed_height and allowed_width need to be set:
if 0 < allowed_width <= 1 and 0 < allowed_height <= 1:
allowed_width, allowed_height = screen_dimensions()
elif 0 < allowed_width <= 1 or 0 < allowed_height <= 1:
raise ValueError("Both or neither of allowed_width and \
allowed_height can be between 0 and 1.")
# Check if pixels_between_lines needs to be set:
if not pixels_between_lines:
pixels_between_lines = f.get_linesize()
else:
assert pixels_between_lines > 0, "pixels_between_lines must be \
positive."
# Make sure that allowed_height can accommodate the tallest word in
# source:
assert f.size(source)[1] <= allowed_height, "allowed_height cannot \
accommodate source."
screens = []
# Break source into paragraphs and paragraphs into single words:
paragraphs = source.split("\n")
single_words = []
for paragraph in paragraphs:
individual_words = paragraph.split(" ")
# While here, verify that the longest word fits:
widest_word, pixels = longest_string_to_render(individual_words, f)
assert pixels < allowed_width, "{:s} in source is too long for \
allowed_width.".format(widest_word)
single_words.append(individual_words)
# The function branches next, depending on whether restrictions have been
# placed on where screen breaks can occur.
if not end_screens_with:
# Screen breaks can occur following any word.
# Break single_words into lines without regard to screens:
lines_of_text, total_height = wrap_text(
single_words,
allowed_width,
f,
return_height = True,
line_height = pixels_between_lines
)
if total_height <= allowed_height:
# Everything fits on one screen.
screens.append(lines_of_text)
else:
# There will be at least two screens.
# Initialize the first screen and a height counter:
screen = []
screen_height = 0
for line in lines_of_text:
line_height = f.size(line)[1]
screen_height = screen_height+line_height+pixels_between_lines
if screen_height < allowed_height:
# line fits on the current screen.
screen.append(line)
elif screen_height == allowed_height or screen_height-pixels_between_lines < allowed_height:
# line fits, but no more will.
screen.append(line)
screens.append(screen)
screen = []
screen_height = 0
else:
# line doesn't fit.
screens.append(screen)
screen = [line]
screen_height = line_height+pixels_between_lines
# Check for a remaining screen:
if screen:
screens.append(screen)\
else:
# Screens can only end following specific strings.
# These strings do not need to be end-of-sentence characters, but it
# is difficult to imagine what else they would be. Therefore, I refer
# to the resulting strings as sentences, acknowledging that this may
# be incorrect terminology.
# Break paragraphs into sentences:
sentences = []
for paragraph in paragraphs:
if sentences:
# This is not the first line, so start the paragraph on a new
# line:
sentences.append("")
if paragraph:
# paragraph is not a blank line.
# Break it into sentences:
paragraph_as_sentences = text_to_sentences(
paragraph,
terminators = end_screens_with,
exclude = do_not_include
)
sentences = sentences+paragraph_as_sentences
else:
# paragraph is a blank line.
sentences.append("")
# Initialize the first screen:
screen = []
for sentence in sentences:
# Determine whether sentence starts on a new line or continues
# from the current line:
if screen:
# If the last line in screen is blank, then sentence starts on
# a new line.
last_line = screen[-1]
if last_line:
next_line = False
else:
next_line = True
else:
# This screen is blank.
# Arbitrarily set next_line to False:
next_line = False
# Try adding sentence to the current screen:
possible_screen, screen_height = wrap_text(
sentence,
allowed_width,
f,
old_text = screen,
start_new_line = next_line,
return_height = True,
line_height = pixels_between_lines
)
if screen_height <= allowed_height:
# Update the current screen:
screen = possible_screen
else:
# This sentence does not fit.
# If screen is currently blank, it means that sentence needs
# to be broken across screens (i.e., it will not fit on a
# single screen).
if screen:
# This is not an issue.
# Save screen:
screens.append(screen)
# Initialize the next screen with sentence:
screen, current_height = wrap_text(
sentence,
allowed_width,
f,
return_height = True,
line_height = pixels_between_lines
)
if current_height > allowed_height:
# sentence needs to be broken across screens.
# This can be accomplished by calling the present
# function without restrictions on screen endings.
# However, the text currently on screen is needed too.
text_to_add = ""
for line in screen:
text_to_add = text_to_add+line+""
text_to_add = text_to_add+sentence
multiple_screens = string_to_screens_and_lines(
text_to_add,
allowed_width,
allowed_height,
f,
pixels_between_lines = pixels_between_lines
)
for s in multiple_screens:
screens.append(s)
else:
# screen is empty, but sentence will not fit.
# Call the present function to get this sentence's
# screens:
multiple_screens = string_to_screens_and_lines(
sentence,
allowed_width,
allowed_height,
f,
pixels_between_lines = pixels_between_lines
)
for s in multiple_screens:
screens.append(s)
# Check if a final screen needs to be added:
if screen:
screens.append(screen)
return screens
def render_string(s, f, colour, background, antialiasing = True):
"""
Create pygame.Surface and pygame.Rect objects for a string, using a
given font (f) and colour.
Parameters:
s: the string to render.
f: the font in which to render s.
colour: the colour of text to use, expressed as an RGB list or tuple.
background: the background colour.
Keyword Parameters:
antialiasing: indicates whether text is rendered with antialiasing;
defaults to True.
Returns:
s: the pygame.Surface object.
r: the pygame.Rect object.
"""
s = f.render(s, antialiasing, colour, background)
r = s.get_rect()
return s, r
def render_lines(lines, f, text_colour, background_colour, line_size = None, use_antialiasing = True):
"""
Create pygame.Surface and pygame.Rect objects for a list of strings.
Parameters:
lines: the lines to render; "" is treated as a blank line.
f: the font in which to render text.
text_colour: an RGB list or tuple for the colour of the text.
background_colour: RGB for background.
Keyword Parameters:
line_size: the number of pixel rows between lines; defaults to None,
in which case it is set from f.
use_antialiasing: indicates whether lines are rendered with
antialiasing; defaults to True.
Returns:
surf: the pygame.Surface object.
rect: the pygame.Rect object.
"""
height = 0
surfaces = []
rects = []
for line in lines:
s, r = render_string(
line, f, text_colour, background_colour,
antialiasing = use_antialiasing
)
surfaces.append(s)
rects.append(r)
height = height+r.height
try:
height = height+line_size*(len(surfaces)-1)
except TypeError:
line_size = f.get_linesize()
height = height+line_size*(len(surfaces)-1)
# height | |
y)
if self.multimetric_:
score = score[self.refit]
return score
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def score_samples(self, X):
"""Call score_samples on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``score_samples``.
.. versionadded:: 0.24
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements
of the underlying estimator.
Returns
-------
y_score : ndarray of shape (n_samples,)
"""
self._check_is_fitted('score_samples')
return self.best_estimator_.score_samples(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict')
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict_proba')
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict_log_proba')
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('decision_function')
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('transform')
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found params.
Only available if the underlying estimator implements
``inverse_transform`` and ``refit=True``.
Parameters
----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('inverse_transform')
return self.best_estimator_.inverse_transform(Xt)
def fit(self, X, y=None, *, groups=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples, n_output) \
or (n_samples,), default=None
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
**fit_params : dict of str -> object
Parameters passed to the ``fit`` method of the estimator
"""
estimator = self.estimator
refit_metric = "score"
if callable(self.scoring):
scorers = self.scoring
elif self.scoring is None or isinstance(self.scoring, str):
scorers = check_scoring(self.estimator, self.scoring)
else:
scorers = _check_multimetric_scoring(self.estimator, self.scoring)
self._check_refit_for_multimetric(scorers)
refit_metric = self.refit
#X, y, groups = indexable(X, y, groups) # todo debug
fit_params = _check_fit_params(X, fit_params)
cv_orig = check_cv(self.cv, y, classifier=is_classifier(estimator))
n_splits = cv_orig.get_n_splits(X, y, groups)
base_estimator = clone(self.estimator)
parallel = Parallel(n_jobs=self.n_jobs,
pre_dispatch=self.pre_dispatch)
fit_and_score_kwargs = dict(scorer=scorers,
fit_params=fit_params,
return_train_score=self.return_train_score,
return_n_test_samples=True,
return_times=True,
return_parameters=False,
error_score=self.error_score,
verbose=self.verbose)
results = {}
with parallel:
all_candidate_params = []
all_out = []
all_more_results = defaultdict(list)
def evaluate_candidates(candidate_params, cv=None,
more_results=None):
cv = cv or cv_orig
candidate_params = list(candidate_params)
n_candidates = len(candidate_params)
if self.verbose > 0:
print("Fitting {0} folds for each of {1} candidates,"
" totalling {2} fits".format(
n_splits, n_candidates, n_candidates * n_splits))
if self.online_train_val_split:
can = enumerate(candidate_params)
spl = enumerate(cv.split(X, None, groups))
lst = []
for (cand_idx, parameters), (split_idx, (train, test)) in product(can, spl):
lst.append(delayed(_fit_and_score)(
clone(base_estimator),
X, y,
train=train, test=test,
parameters=parameters,
online_train_val_split=True,
**fit_and_score_kwargs))
out = parallel(lst)
else:
can = enumerate(candidate_params)
spl = enumerate(cv.split(X, y, groups))
lst = []
for (cand_idx, parameters), (split_idx, (train, test)) in product(can, spl):
lst.append(delayed(_fit_and_score)(
clone(base_estimator),
X, y,
train=train, test=test,
parameters=parameters,
split_progress=(
split_idx,
n_splits),
candidate_progress=(
cand_idx,
n_candidates),
online_train_val_split=False,
**fit_and_score_kwargs))
out = parallel(lst)
# out = parallel(delayed(_fit_and_score)(clone(base_estimator),
# X, y,
# train=train, test=test,
# parameters=parameters,
# split_progress=(
# split_idx,
# n_splits),
# candidate_progress=(
# cand_idx,
# n_candidates),
# **fit_and_score_kwargs)
# for (cand_idx, parameters),
# (split_idx, (train, test)) in product(
# enumerate(candidate_params),
# enumerate(cv.split(X, y, groups)))
# )
if len(out) < 1:
raise ValueError('No fits were performed. '
'Was the CV iterator empty? '
'Were there no candidates?')
elif len(out) != n_candidates * n_splits:
raise ValueError('cv.split and cv.get_n_splits returned '
'inconsistent results. Expected {} '
'splits, got {}'
.format(n_splits,
len(out) // n_candidates))
# For callable self.scoring, the return type is only know after
# calling. If the return type is a dictionary, the error scores
# can now be inserted with the correct key. The type checking
# of out will be done in `_insert_error_scores`.
if callable(self.scoring):
_insert_error_scores(out, self.error_score)
all_candidate_params.extend(candidate_params)
all_out.extend(out)
if more_results is not None:
for key, value in more_results.items():
all_more_results[key].extend(value)
nonlocal results
results = self._format_results(
all_candidate_params, n_splits, all_out,
all_more_results)
return results
self._run_search(evaluate_candidates)
# multimetric is determined here because in the case of a callable
# self.scoring the return type is only known after calling
first_test_score = all_out[0]['test_scores']
self.multimetric_ = isinstance(first_test_score, dict)
# check refit_metric now for a callabe scorer that is multimetric
if callable(self.scoring) and self.multimetric_:
self._check_refit_for_multimetric(first_test_score)
refit_metric = self.refit
# For multi-metric evaluation, store the best_index_, best_params_ and
# best_score_ iff refit is one of the scorer names
# In single metric evaluation, refit_metric is "score"
if self.refit or not self.multimetric_:
# If callable, refit is expected to return the index of the best
# parameter set.
if callable(self.refit):
self.best_index_ = self.refit(results)
if not isinstance(self.best_index_, np.numbers.Integral):
raise TypeError('best_index_ returned is not an integer')
if (self.best_index_ < 0 or
self.best_index_ >= len(results["params"])):
raise IndexError('best_index_ index out of range')
else:
self.best_index_ = results["rank_test_%s"
% refit_metric].argmin()
self.best_score_ = results["mean_test_%s" % refit_metric][
self.best_index_]
self.best_params_ = results["params"][self.best_index_]
if self.refit:
# we clone again after setting params in case some
# of the params are estimators as well.
self.best_estimator_ = clone(clone(base_estimator).set_params(
**self.best_params_))
refit_start_time = time.time()
if isinstance(self.best_estimator_, Pipeline):
self.best_estimator_.train()
# todo set train intervall to whole dataset
if y is not None:
self.best_estimator_.fit(X, y, **fit_params)
else:
self.best_estimator_.fit(X, **fit_params)
if isinstance(self.best_estimator_, Pipeline):
self.best_estimator_.prod()
refit_end_time = time.time()
self.refit_time_ = refit_end_time - refit_start_time
# Store the only scorer not as a dict for single metric evaluation
self.scorer_ = scorers
self.cv_results_ = results
self.n_splits_ = n_splits
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
"""
_required_parameters = ["estimator", "param_grid"]
@_deprecate_positional_args
def __init__(self, estimator, param_grid, *, online_train_val_split=False,
scoring=None, n_jobs=None, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs',
error_score=np.nan, return_train_score=False):
super().__init__(
estimator=estimator, scoring=scoring,
online_train_val_split=online_train_val_split,
n_jobs=n_jobs, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def _run_search(self, evaluate_candidates):
"""Search all candidates in param_grid"""
evaluate_candidates(ParameterGrid(self.param_grid))
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, return_n_test_samples=False,
return_times=False, return_estimator=False,
split_progress=None, candidate_progress=None,
error_score=np.nan, online_train_val_split=False):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
The target variable to try to predict in the case of
supervised learning.
scorer : A single callable or dict mapping scorer name to the callable
If it is a single callable, the return value for ``train_scores`` and
``test_scores`` is a single float.
For a dict, it should be one mapping the scorer name to the scorer
callable object / function.
The callable object / fn should have signature
``scorer(estimator, X, y)``.
train : array-like of shape (n_train_samples,)
Indices of training samples.
test : array-like of shape (n_test_samples,)
Indices of test samples.
verbose : int
The verbosity level.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict | |
from __future__ import annotations
from typing import Union, Optional
import importlib
import os
import pathlib
from enum import Enum
from route import quick_invalid, write, Cause
from route import error as e
class Method(Enum):
GET = "GET"
HEAD = "HEAD"
TRACE = "TRACE"
OPTIONS = "OPTIONS"
CONNECT = "CONNECT"
POST = "POST"
PUT = "PUT"
DELETE = "DELETE"
PATCH = "PATCH"
@staticmethod
def values() -> list:
return [ev.value for ev in Method]
def __radd__(self, other):
return str(self) + "|" + str(other)
def __and__(self, other):
return str(self) + "|" + str(other)
def __or__(self, other):
return str(self) + "|" + str(other)
def __str__(self):
return self.value
class Document:
def __init__(self,
title: str = None, # title or summary required
summary: str = None,
# description: str = "Description",
# desc: str = "Description",
types: Union[list, str] = "application/octet-stream",
example: Union[dict, str, int, float, bool, any] = None,
security: Optional[dict] = None,
responses: Optional[list] = None,
tags: Optional[list] = None,
format_type: Optional[str] = None,
**more_args):
if title is None and summary is None:
raise ValueError("Title or Summary must not be None.")
self.title = summary if title is None else title
# self.description = desc if description is None else description
self.types = [types] if isinstance(types, str) else types
self.example = example
self.security = security
self.more = more_args
self.responses = [] if responses is None else responses
self.tags = [] if tags is None else tags
self.format = format_type
def http(method: str,
require_auth: bool = True,
args: Union[tuple, list, Argument] = (),
docs: Optional[Document] = None):
def _context(handler):
path = None
file = handler.__globals__["__file__"]
if "___" in os.path.normpath(file).split(os.path.sep):
raise IsADirectoryError("Path-argument like directory found.")
ep_dir = os.path.dirname(file)
if file.endswith("___.py") and \
len([name for name in os.listdir(ep_dir) if os.path.isfile(ep_dir + "/" + name)]) >= 2:
raise FileExistsError("Endpoint conflict")
for base in loader.known_source:
if os.path.abspath(file).startswith(os.path.abspath(base)):
path = os.path.relpath(file, os.path.relpath(base))
if path is None:
raise FileNotFoundError("Base path not found.")
path = path.replace(os.sep, "/")
pp = 0
if isinstance(args, Argument):
arg3 = (args,)
else:
arg3 = args
for arg in arg3:
if arg.arg_in == "path":
if (path != "___" and path in "___") or "__" not in path:
raise ValueError("Can't routing to this endpoint.")
if arg.arg_in == "path":
pp += 1
if isinstance(method, str):
if method == "*":
fig = method.split("|")
for met in Method.values():
if met in fig:
continue
loader.signals.append({
"method": met,
"func": handler,
"path": path,
"require_auth": require_auth,
"args": arg3,
"docs": docs
})
return
if "|" in method:
for met in method.split("|"):
loader.signals.append({
"method": met,
"func": handler,
"path": path,
"require_auth": require_auth,
"args": arg3,
"docs": docs
})
return
loader.signals.append({
"method": method,
"func": handler,
"path": path,
"require_auth": require_auth,
"args": arg3,
"docs": docs
})
return handler
return _context
class Documented:
def __init__(self, document: Optional[Document] = None):
self.docs = document
class Undefined: pass
class Argument(Documented):
def __init__(self,
name: str,
arg_type: str,
arg_in: str,
required: bool = True,
auto_cast: bool = True,
minimum: int = -1,
maximum: int = -1,
must_be: Union[tuple, list] = (),
doc: Optional[Document] = None,
format_type: Optional[str] = None,
ignore_check_expect100: bool = False,
enum: Union[tuple, list] = (),
default: Optional[any] = Undefined):
super().__init__(doc)
if arg_type not in ["str", "string", "bool", "boolean", "number", "int", "long",
"double", "decimal", "float", "other"]:
raise ValueError("Argument type is must be valid type.")
if arg_in not in ["path", "query", "body"]:
raise ValueError("Argument location is mut be valid type.")
self.name = name
self.type = arg_type
self.arg_in = arg_in
self.required = required
self.auto_cast = auto_cast
self.min = minimum
self.max = maximum
self.must_be = must_be if enum is None else enum
self.document = doc
self.format = format_type
self.ignore_check_expect100 = ignore_check_expect100
self.default = default
def norm_type(self, val: Optional[any] = None) -> Optional[any]:
if "str" in self.type:
return "string" if val is None else str(val)
elif "bool" in self.type:
return "bool" if val is None else bool(val)
elif self.type == "number" or "int" in self.type:
return "integer" if val is None else int(val)
elif self.type == "long":
return "integer" if val is None else int(val)
else:
return "number" if val is None else float(val)
def validate(self, param_dict: dict) -> int:
# NOT_FOUND = -1, OK = 0, NOT_MATCH = 1, TYPE_ERR = 2, MINIMUM_ERR = 3, MAXIMUM_ERR = 4
name = self.name
typ = self.type
cast = self.auto_cast
must_be = self.must_be
min_val = self.min
max_val = self.max
if name not in param_dict:
if self.default != Undefined:
if cast:
param_dict[name] = self.norm_type(self.default)
else:
param_dict[name] = self.default
return 0
if self.required:
if self.ignore_check_expect100:
return 0
return -1
else:
return 0
value = param_dict[name]
if "str" in typ:
if len(must_be) != 0 and value not in must_be:
return 1
if min_val != -1 and len(value) < min_val:
return 3
if max_val != -1 and len(value) > max_val:
return 4
if cast:
param_dict[name] = str(value)
elif "bool" in typ:
if value not in ("true", "false") + self.must_be:
return 1
if cast:
param_dict[name] = bool(value)
elif typ == "other":
return 0
else:
try:
if "int" in self.type or self.type == "number":
val = int(value)
else:
val = float(value)
except ValueError:
return 2
if len(must_be) != 0 and val not in must_be:
return 1
if min_val != -1 and val < min_val:
return 3
if max_val != -1 and val > max_val:
return 4
if cast:
param_dict[name] = val
return 0
class EndPoint(Documented):
def __init__(self,
method: str,
route_path: str,
rel_path: str,
handler,
auth_required: bool = True,
args: Optional[list] = None,
path_arg: bool = False,
doc: Optional[Document] = None):
super().__init__(doc)
self.method = method
self.route_path = route_path
self.rel_path = rel_path
self.handler = handler
self.auth_required = auth_required
self.args = () if args is None else args
self.path_arg = path_arg
def handle(self, handler, params: dict, queries: dict, path_param: dict) -> Union[Response, any]:
if self.auth_required and handler.do_auth():
return
if not self.validate_arg(handler, params, queries, path_param):
return
return self.handler(handler, params)
def validate_arg(self, handler, params: dict, queries: dict, path_param: dict) -> bool:
missing = []
for arg in self.args:
arg: Argument
if arg.arg_in == "query":
code = arg.validate(queries)
if code != -1 and arg.name not in queries:
continue
elif arg.arg_in == "body":
code = arg.validate(params)
if code != -1 and arg.name not in params:
continue
elif arg.arg_in == "path":
code = arg.validate(path_param)
if code != -1 and arg.name not in path_param:
continue
else:
raise ValueError(f"Validate failed: N:{arg.name} - T:{arg.type} - I:{arg.arg_in}")
if code == -1:
missing.append(arg.name)
continue
elif code == 1:
if "bool" in arg.type:
quick_invalid(handler, arg.name, "[" + ", ".join(("true", "false") + arg.must_be) + "]")
return False
else:
quick_invalid(handler, arg.name, "[" + ", ".join(arg.must_be) + "]")
return False
elif code == 2:
quick_invalid(handler, arg.name, arg.norm_type())
return False
elif code == 3:
if "str" in arg.name:
quick_invalid(handler, arg.name, f"at least {arg.min} character")
return False
else:
quick_invalid(handler, arg.name, f"at least {arg.min}")
return False
elif code == 4:
if "str" in arg.name:
quick_invalid(handler, arg.name, f"less than {arg.max} character")
return False
else:
quick_invalid(handler, arg.name, f"less than {arg.max}")
return False
if arg.arg_in == "query":
val = arg.norm_type(queries[arg.name]) if arg.auto_cast else queries[arg.name]
params[arg.name] = val
elif arg.arg_in == "path":
val = arg.norm_type(path_param[arg.name]) if arg.auto_cast else path_param[arg.name]
params[arg.name] = val
if len(missing) != 0:
write(handler, 400, e(Cause.MISSING_FIELD, Cause.MISSING_FIELD[2]
.replace("%0", str(len(missing)))
.replace("%1", ", ".join(missing))))
return False
return True
class Response(Documented):
def __init__(self,
code: int = 0,
body: Optional[any] = None,
raw_body: bool = False,
content_type: Union[str, list] = None,
headers: Optional[dict] = None,
doc: Optional[Document] = None):
super().__init__(doc)
self.code = code
self.docs = doc
self.headers = {} if headers is None else headers
self.body_data = body
self.raw = raw_body
self.cont_type = content_type
def header(self, name: str, value: str) -> Response:
self.headers[name] = value
return self
def body(self, value: any, raw: bool = False) -> Response:
self.body_data = value
self.raw = raw
return self
def content_type(self, value: str) -> Response:
self.cont_type = value
self.header("Content-Type", value)
return self
def get_code(self) -> int:
return self.code
class SuccessResponse(Response):
pass
class ErrorResponse(Response):
def __init__(self,
cause: Optional[Cause] = None,
code: int = 0,
headers: Optional[dict] = None,
body: Optional[any] = None,
content_type: Optional[Union[str, list]] = None,
doc: Optional[Document] = None):
if cause is not None:
super().__init__(cause[0], headers, | |
u"ru"
for char in u"마":
self.trans[char] = u"ma"
for char in u"니":
self.trans[char] = u"ni"
for char in u"아":
self.trans[char] = u"a"
for char in u"독":
self.trans[char] = u"dok"
for char in u"일":
self.trans[char] = u"il"
for char in u"모":
self.trans[char] = u"mo"
for char in u"크":
self.trans[char] = u"keu"
for char in u"샤":
self.trans[char] = u"sya"
for char in u"영":
self.trans[char] = u"yeong"
for char in u"불":
self.trans[char] = u"bul"
for char in u"가":
self.trans[char] = u"ga"
for char in u"리":
self.trans[char] = u"ri"
for char in u"그":
self.trans[char] = u"geu"
for char in u"지":
self.trans[char] = u"ji"
for char in u"야":
self.trans[char] = u"ya"
for char in u"바":
self.trans[char] = u"ba"
for char in u"슈":
self.trans[char] = u"syu"
for char in u"키":
self.trans[char] = u"ki"
for char in u"프":
self.trans[char] = u"peu"
for char in u"랑":
self.trans[char] = u"rang"
for char in u"스":
self.trans[char] = u"seu"
for char in u"로":
self.trans[char] = u"ro"
for char in u"메":
self.trans[char] = u"me"
for char in u"역":
self.trans[char] = u"yeok"
for char in u"도":
self.trans[char] = u"do"
# Kannada
self.trans[u"ಅ"] = u"a"
for char in u"ಆಾ":
self.trans[char] = u"aa"
for char in u"ಇಿ":
self.trans[char] = u"i"
for char in u"ಈೀ":
self.trans[char] = u"ii"
for char in u"ಉು":
self.trans[char] = u"u"
for char in u"ಊೂ":
self.trans[char] = u"uu"
for char in u"ಋೂ":
self.trans[char] = u"r'"
for char in u"ಎೆ":
self.trans[char] = u"e"
for char in u"ಏೇ":
self.trans[char] = u"ee"
for char in u"ಐೈ":
self.trans[char] = u"ai"
for char in u"ಒೊ":
self.trans[char] = u"o"
for char in u"ಓೋ":
self.trans[char] = u"oo"
for char in u"ಔೌ":
self.trans[char] = u"au"
self.trans[u"ಂ"] = u"m'"
self.trans[u"ಃ"] = u"h'"
self.trans[u"ಕ"] = u"k"
self.trans[u"ಖ"] = u"kh"
self.trans[u"ಗ"] = u"g"
self.trans[u"ಘ"] = u"gh"
self.trans[u"ಙ"] = u"ng"
self.trans[u"ಚ"] = u"c"
self.trans[u"ಛ"] = u"ch"
self.trans[u"ಜ"] = u"j"
self.trans[u"ಝ"] = u"ny"
self.trans[u"ಟ"] = u"tt"
self.trans[u"ಠ"] = u"tth"
self.trans[u"ಡ"] = u"dd"
self.trans[u"ಢ"] = u"ddh"
self.trans[u"ಣ"] = u"nn"
self.trans[u"ತ"] = u"t"
self.trans[u"ಥ"] = u"th"
self.trans[u"ದ"] = u"d"
self.trans[u"ಧ"] = u"dh"
self.trans[u"ನ"] = u"n"
self.trans[u"ಪ"] = u"p"
self.trans[u"ಫ"] = u"ph"
self.trans[u"ಬ"] = u"b"
self.trans[u"ಭ"] = u"bh"
self.trans[u"ಮ"] = u"m"
self.trans[u"ಯ"] = u"y"
self.trans[u"ರ"] = u"r"
self.trans[u"ಲ"] = u"l"
self.trans[u"ವ"] = u"v"
self.trans[u"ಶ"] = u"sh"
self.trans[u"ಷ"] = u"ss"
self.trans[u"ಸ"] = u"s"
self.trans[u"ಹ"] = u"h"
self.trans[u"ಳ"] = u"ll"
self.trans[u"೦"] = u"0"
self.trans[u"೧"] = u"1"
self.trans[u"೨"] = u"2"
self.trans[u"೩"] = u"3"
self.trans[u"೪"] = u"4"
self.trans[u"೫"] = u"5"
self.trans[u"೬"] = u"6"
self.trans[u"೭"] = u"7"
self.trans[u"೮"] = u"8"
self.trans[u"೯"] = u"9"
# Telugu
for char in u"అ":
self.trans[char] = u"a"
for char in u"ఆా":
self.trans[char] = u"aa"
for char in u"ఇి":
self.trans[char] = u"i"
for char in u"ఈీ":
self.trans[char] = u"ii"
for char in u"ఉు":
self.trans[char] = u"u"
for char in u"ఊూ":
self.trans[char] = u"uu"
for char in u"ఋృ":
self.trans[char] = u"r'"
for char in u"ౠౄ":
self.trans[char] = u'r"'
self.trans[u"ఌ"] = u"l'"
self.trans[u"ౡ"] = u'l"'
for char in u"ఎె":
self.trans[char] = u"e"
for char in u"ఏే":
self.trans[char] = u"ee"
for char in u"ఐై":
self.trans[char] = u"ai"
for char in u"ఒొ":
self.trans[char] = u"o"
for char in u"ఓో":
self.trans[char] = u"oo"
for char in u"ఔౌ":
self.trans[char] = u"au"
self.trans[u"ం"] = u"'"
self.trans[u"ః"] = u'"'
self.trans[u"క"] = u"k"
self.trans[u"ఖ"] = u"kh"
self.trans[u"గ"] = u"g"
self.trans[u"ఘ"] = u"gh"
self.trans[u"ఙ"] = u"ng"
self.trans[u"చ"] = u"ts"
self.trans[u"ఛ"] = u"tsh"
self.trans[u"జ"] = u"j"
self.trans[u"ఝ"] = u"jh"
self.trans[u"ఞ"] = u"ñ"
for char in u"టత":
self.trans[char] = u"t"
for char in u"ఠథ":
self.trans[char] = u"th"
for char in u"డద":
self.trans[char] = u"d"
for char in u"ఢధ":
self.trans[char] = u"dh"
for char in u"ణన":
self.trans[char] = u"n"
self.trans[u"ప"] = u"p"
self.trans[u"ఫ"] = u"ph"
self.trans[u"బ"] = u"b"
self.trans[u"భ"] = u"bh"
self.trans[u"మ"] = u"m"
self.trans[u"య"] = u"y"
for char in u"రఱ":
self.trans[char] = u"r"
for char in u"లళ":
self.trans[char] = u"l"
self.trans[u"వ"] = u"v"
self.trans[u"శ"] = u"sh"
for char in u"షస":
self.trans[char] = u"s"
self.trans[u"హ"] = u"h"
self.trans[u"్"] = ""
for char in u"ంఁ":
self.trans[char] = u"^"
self.trans[u"ః"] = u"-"
self.trans[u"౦"] = u"0"
self.trans[u"౧"] = u"1"
self.trans[u"౨"] = u"2"
self.trans[u"౩"] = u"3"
self.trans[u"౪"] = u"4"
self.trans[u"౫"] = u"5"
self.trans[u"౬"] = u"6"
self.trans[u"౭"] = u"7"
self.trans[u"౮"] = u"8"
self.trans[u"౯"] = u"9"
self.trans[u"౹"] = u"1/4"
self.trans[u"౺"] = u"1/2"
self.trans[u"౻"] = u"3/4"
self.trans[u"౼"] = u"1/16"
self.trans[u"౽"] = u"1/8"
self.trans[u"౾"] = u"3/16"
# Lao - note: pronounciation in initial position is used;
# different pronounciation in final position is ignored
self.trans[u"ກ"] = "k"
for char in u"ຂຄ":
self.trans[char] = "kh"
self.trans[u"ງ"] = "ng"
self.trans[u"ຈ"] = "ch"
for char in u"ສຊ":
self.trans[char] = "s"
self.trans[u"ຍ"] = "ny"
self.trans[u"ດ"] = "d"
self.trans[u"ຕ"] = "t"
for char in u"ຖທ":
self.trans[char] = "th"
self.trans[u"ນ"] = "n"
self.trans[u"ບ"] = "b"
self.trans[u"ປ"] = "p"
for char in u"ຜພ":
self.trans[char] = "ph"
for char in u"ຝຟ":
self.trans[char] = "f"
for char in u"ມໝ":
self.trans[char] = "m"
self.trans[u"ຢ"] = "y"
for char in u"ຣຼ":
self.trans[char] = "r"
for char in u"ລຼ":
self.trans[char] = "l"
self.trans[u"ວ"] = "v"
for char in u"ຮ":
self.trans[char] = "h"
self.trans[u"ອ"] = "'"
for char in u"ະັ":
self.trans[char] = "a"
self.trans[u"ິ"] = "i"
self.trans[u"ຶ"] = "ue"
self.trans[u"ຸ"] = "u"
self.trans[u"ເ"] = u"é"
self.trans[u"ແ"] = u"è"
for char in u"ໂົາໍ":
self.trans[char] = "o"
self.trans[u"ຽ"] = "ia"
self.trans[u"ເຶ"] = "uea"
self.trans[u"ຍ"] = "i"
for char in u"ໄໃ":
self.trans[char] = "ai"
self.trans[u"ຳ"] = "am"
self.trans[u"າ"] = "aa"
self.trans[u"ີ"] = "ii"
self.trans[u"ື"] = "yy"
self.trans[u"ູ"] = "uu"
self.trans[u"ເ"] = "e"
self.trans[u"ແ"] = "ei"
self.trans[u"໐"] = "0"
self.trans[u"໑"] = "1"
self.trans[u"໒"] = "2"
self.trans[u"໓"] = "3"
self.trans[u"໔"] = "4"
self.trans[u"໕"] = "5"
self.trans[u"໖"] = "6"
self.trans[u"໗"] = "7"
self.trans[u"໘"] = "8"
self.trans[u"໙"] = "9"
# Chinese -- note: incomplete
for char in u"埃挨哎唉哀皑癌蔼矮艾碍爱隘":
self.trans[char] = u"ai"
for char in u"鞍氨安俺按暗岸胺案":
self.trans[char] = u"an"
for char in u"肮昂盎":
self.trans[char] = u"ang"
for char in u"凹敖熬翱袄傲奥懊澳":
self.trans[char] = u"ao"
for char in u"芭捌扒叭吧笆八疤巴拔跋靶把耙坝霸罢爸":
self.trans[char] = u"ba"
for char in u"白柏百摆佰败拜稗":
self.trans[char] = u"bai"
for char in u"斑班搬扳般颁板版扮拌伴瓣半办绊":
self.trans[char] = u"ban"
for char in u"邦帮梆榜膀绑棒磅蚌镑傍谤":
self.trans[char] = u"bang"
for char in u"苞胞包褒剥薄雹保堡饱宝抱报暴豹鲍爆":
self.trans[char] = u"bao"
for char in u"杯碑悲卑北辈背贝钡倍狈备惫焙被":
self.trans[char] = u"bei"
for char in u"奔苯本笨":
self.trans[char] = u"ben"
for char in u"崩绷甭泵蹦迸":
self.trans[char] = u"beng"
for char in u"逼鼻比鄙笔彼碧蓖蔽毕毙毖币庇痹闭敝弊必辟壁臂避陛":
self.trans[char] = u"bi"
for char in u"鞭边编贬扁便变卞辨辩辫遍":
self.trans[char] = u"bian"
for char in u"标彪膘表":
self.trans[char] = u"biao"
for char in u"鳖憋别瘪":
self.trans[char] = u"bie"
for char in u"彬斌濒滨宾摈":
self.trans[char] = u"bin"
for char in u"兵冰柄丙秉饼炳病并":
self.trans[char] = u"bing"
for char in u"玻菠播拨钵波博勃搏铂箔伯帛舶脖膊渤泊驳捕卜亳":
self.trans[char] = u"bo"
for char in u"哺补埠不布步簿部怖":
self.trans[char] = u"bu"
for char in u"猜裁材才财睬踩采彩菜蔡":
self.trans[char] = u"cai"
for char in u"餐参蚕残惭惨灿":
self.trans[char] = u"can"
for char in u"苍舱仓沧藏":
self.trans[char] = u"cang"
for char in u"操糙槽曹草":
self.trans[char] = u"cao"
for char in u"厕策侧册测":
self.trans[char] = u"ce"
for char in u"层蹭":
self.trans[char] = u"ceng"
for char in u"插叉茬茶查碴搽察岔差诧":
self.trans[char] = u"cha"
for char in u"拆柴豺":
self.trans[char] = u"chai"
for char in u"搀掺蝉馋谗缠铲产阐颤":
self.trans[char] = u"chan"
for char in u"昌猖场尝常长偿肠厂敞畅唱倡":
self.trans[char] = u"chang"
for char in u"超抄钞朝嘲潮巢吵炒":
self.trans[char] = u"chao"
for char in u"车扯撤掣彻澈":
self.trans[char] = u"che"
for char in u"郴臣辰尘晨忱沉陈趁衬":
self.trans[char] = u"chen"
for char in u"撑称城橙成呈乘程惩澄诚承逞骋秤":
self.trans[char] = u"cheng"
for char in u"吃痴持匙池迟弛驰耻齿侈尺赤翅斥炽":
self.trans[char] = u"chi"
for char in u"充冲虫崇宠":
self.trans[char] = u"chong"
for char in u"抽酬畴踌稠愁筹仇绸瞅丑臭":
self.trans[char] = u"chou"
for char in u"初出橱厨躇锄雏滁除楚储矗搐触处":
self.trans[char] = u"chu"
for char in u"揣":
self.trans[char] = u"chuai"
for char in u"川穿椽传船喘串":
self.trans[char] = u"chuan"
for char in u"疮窗幢床闯创":
self.trans[char] = u"chuang"
for char in u"吹炊捶锤垂":
self.trans[char] = u"chui"
for char in u"春椿醇唇淳纯蠢":
self.trans[char] = u"chun"
for char in u"戳绰":
self.trans[char] = u"chuo"
for char in u"疵茨磁雌辞慈瓷词此刺赐次":
self.trans[char] = u"ci"
for char in u"聪葱囱匆从丛":
self.trans[char] = u"cong"
for char in u"凑":
self.trans[char] = u"cou"
for char in u"粗醋簇促":
self.trans[char] = u"cu"
for char in u"蹿篡窜":
self.trans[char] = u"cuan"
for char in u"摧崔催脆瘁粹淬翠":
self.trans[char] = u"cui"
for char in u"村存寸":
self.trans[char] = u"cun"
for char in u"磋撮搓措挫错":
self.trans[char] = u"cuo"
for char in u"搭达答瘩打大":
self.trans[char] = u"da"
for char in u"呆歹傣戴带殆代贷袋待逮怠":
self.trans[char] | |
based on model data
class ModelFilterParser(HasAModelManager):
"""
Converts string tuples (partially converted query string params) of
attr, op, val into either:
- ORM based filters (filters that can be applied by the ORM at the SQL
level) or
- functional filters (filters that use derived values or values not
within the SQL tables)
These filters can then be applied to queries.
This abstraction allows 'smarter' application of limit and offset at either the
SQL level or the generator/list level based on the presence of functional
filters. In other words, if no functional filters are present, limit and offset
may be applied at the SQL level. If functional filters are present, limit and
offset need to applied at the list level.
These might be safely be replaced in the future by creating SQLAlchemy
hybrid properties or more thoroughly mapping derived values.
"""
# ??: this class kindof 'lives' in both the world of the controllers/param-parsing and to models/orm
# (as the model informs how the filter params are parsed)
# I have no great idea where this 'belongs', so it's here for now
model_class: Type[model._HasTable]
parsed_filter = parsed_filter
orm_filter_parsers: OrmFilterParsersType
fn_filter_parsers: FunctionFilterParsersType
def __init__(self, app: MinimalManagerApp, **kwargs):
"""
Set up serializer map, any additional serializable keys, and views here.
"""
super().__init__(app, **kwargs)
#: regex for testing/dicing iso8601 date strings, with optional time and ms, but allowing only UTC timezone
self.date_string_re = re.compile(
r"^(\d{4}\-\d{2}\-\d{2})[T| ]{0,1}(\d{2}:\d{2}:\d{2}(?:\.\d{1,6}){0,1}){0,1}Z{0,1}$"
)
# dictionary containing parsing data for ORM/SQLAlchemy-based filters
# ..note: although kind of a pain in the ass and verbose, opt-in/allowlisting allows more control
# over potentially expensive queries
self.orm_filter_parsers = {}
#: dictionary containing parsing data for functional filters - applied after a query is made
self.fn_filter_parsers = {}
# set up both of the above
self._add_parsers()
def _add_parsers(self):
"""
Set up, extend, or alter `orm_filter_parsers` and `fn_filter_parsers`.
"""
# note: these are the default filters for all models
self.orm_filter_parsers.update(
{
# (prob.) applicable to all models
"id": {"op": ("in")},
"encoded_id": {"column": "id", "op": ("in"), "val": self.parse_id_list},
# dates can be directly passed through the orm into a filter (no need to parse into datetime object)
"extension": {"op": ("eq", "like", "in")},
"create_time": {"op": ("le", "ge", "lt", "gt"), "val": self.parse_date},
"update_time": {"op": ("le", "ge", "lt", "gt"), "val": self.parse_date},
}
)
def build_filter_params(
self,
query_params: FilterQueryParams,
filter_attr_key: str = "q",
filter_value_key: str = "qv",
attr_op_split_char: str = "-",
) -> List[Tuple[str, str, str]]:
"""
Builds a list of tuples containing filtering information in the form of (attribute, operator, value).
"""
DEFAULT_OP = "eq"
qdict = query_params.dict(exclude_defaults=True)
if filter_attr_key not in qdict:
return []
# precondition: attrs/value pairs are in-order in the qstring
attrs = qdict.get(filter_attr_key)
if not isinstance(attrs, list):
attrs = [attrs]
# ops are strings placed after the attr strings and separated by a split char (e.g. 'create_time-lt')
# ops are optional and default to 'eq'
reparsed_attrs = []
ops = []
for attr in attrs:
op = DEFAULT_OP
if attr_op_split_char in attr:
# note: only split the last (e.g. q=community-tags-in&qv=rna yields ( 'community-tags', 'in', 'rna' )
attr, op = attr.rsplit(attr_op_split_char, 1)
ops.append(op)
reparsed_attrs.append(attr)
attrs = reparsed_attrs
values = qdict.get(filter_value_key, [])
if not isinstance(values, list):
values = [values]
# TODO: it may be more helpful to the consumer if we error on incomplete 3-tuples
# (instead of relying on zip to shorten)
return list(zip(attrs, ops, values))
def parse_query_filters(self, query_filters: FilterQueryParams):
"""Convenience function to parse a FilterQueryParams object into a collection of filtering criteria."""
filter_params = self.build_filter_params(query_filters)
return self.parse_filters(filter_params)
def parse_filters(self, filter_tuple_list):
"""
Parse string 3-tuples (attr, op, val) into orm or functional filters.
"""
# TODO: allow defining the default filter op in this class (and not 'eq' in base/controller.py)
parsed = []
for (attr, op, val) in filter_tuple_list:
filter_ = self.parse_filter(attr, op, val)
parsed.append(filter_)
return parsed
def parse_filter(self, attr, op, val):
"""
Attempt to parse filter as a custom/fn filter, then an orm filter, and
if neither work - raise an error.
:raises exceptions.RequestParameterInvalidException: if no functional or orm
filter can be parsed.
"""
try:
# check for a custom filter
fn_filter = self._parse_fn_filter(attr, op, val)
if fn_filter is not None:
return fn_filter
# if no custom filter found, try to make an ORM filter
# note: have to use explicit is None here, bool( sqlalx.filter ) == False
orm_filter = self._parse_orm_filter(attr, op, val)
if orm_filter is not None:
return orm_filter
# by convention, assume most val parsers raise ValueError
except ValueError as val_err:
raise exceptions.RequestParameterInvalidException(
"unparsable value for filter", column=attr, operation=op, value=val, ValueError=str(val_err)
)
# if neither of the above work, raise an error with how-to info
# TODO: send back all valid filter keys in exception for added user help
raise exceptions.RequestParameterInvalidException("bad filter", column=attr, operation=op)
# ---- fn filters
def _parse_fn_filter(self, attr, op, val):
"""
Attempt to parse a non-ORM filter function.
"""
# fn_filter_list is a dict: fn_filter_list[ attr ] = { 'opname1' : opfn1, 'opname2' : opfn2, etc. }
# attr, op is a nested dictionary pointing to the filter fn
attr_map = self.fn_filter_parsers.get(attr, None)
if not attr_map:
return None
allowed_ops = attr_map["op"]
# allowed ops is a map here, op => fn
filter_fn = allowed_ops.get(op, None)
if not filter_fn:
return None
# parse the val from string using the 'val' parser if present (otherwise, leave as string)
val_parser = attr_map.get("val", None)
if val_parser:
val = val_parser(val)
# curry/partial and fold the val in there now
return self.parsed_filter(filter_type="function", filter=lambda i: filter_fn(i, val))
# ---- ORM filters
def _parse_orm_filter(self, attr, op, val):
"""
Attempt to parse a ORM-based filter.
Using SQLAlchemy, this would yield a sql.elements.BinaryExpression.
"""
# orm_filter_list is a dict: orm_filter_list[ attr ] = <list of allowed ops>
column_map = self.orm_filter_parsers.get(attr, None)
if not column_map:
# no column mapping (not allowlisted)
return None
if callable(column_map):
return self.parsed_filter(filter_type="orm_function", filter=column_map(attr, op, val))
# attr must be an allowlisted column by attr name or by key passed in column_map
# note: column_map[ 'column' ] takes precedence
if "column" in column_map:
attr = column_map["column"]
column = self.model_class.table.columns.get(attr)
if column is None:
# could be a property (hybrid_property, etc.) - assume we can make a filter from it
column = getattr(self.model_class, attr)
if column is None:
# no orm column
return None
# op must be allowlisted: contained in the list orm_filter_list[ attr ][ 'op' ]
allowed_ops = column_map["op"]
if op not in allowed_ops:
return None
op = self._convert_op_string_to_fn(column, op)
if not op:
return None
# parse the val from string using the 'val' parser if present (otherwise, leave as string)
val_parser = column_map.get("val", None)
if val_parser:
val = val_parser(val)
orm_filter = op(val)
return self.parsed_filter(filter_type="orm", filter=orm_filter)
#: these are the easier/shorter string equivalents to the python operator fn names that need '__' around them
UNDERSCORED_OPS = ("lt", "le", "eq", "ne", "ge", "gt")
def _convert_op_string_to_fn(self, column, op_string):
"""
Convert the query string filter op shorthand into actual ORM usable
function names, then return the ORM function.
"""
# correct op_string to usable function key
fn_name = op_string
if op_string in self.UNDERSCORED_OPS:
fn_name = f"__{op_string}__"
elif op_string == "in":
fn_name = "in_"
# get the column fn using the op_string and error if not a callable attr
# TODO: special case 'not in' - or disallow?
op_fn = getattr(column, fn_name, None)
if not op_fn or not callable(op_fn):
return None
return op_fn
# ---- preset fn_filters: dictionaries of standard filter ops for standard datatypes
def string_standard_ops(self, key):
return {
"op": {
"eq": lambda i, v: v == getattr(i, key),
"contains": lambda i, v: v in getattr(i, key),
}
}
# --- more parsers! yay!
# TODO: These should go somewhere central - we've got ~6 parser modules/sections now
def parse_id_list(self, id_list_string, sep=","):
"""
Split `id_list_string` at `sep`.
"""
# TODO: move id decoding out
id_list = [self.app.security.decode_id(id_) for id_ in id_list_string.split(sep)]
return id_list
def parse_int_list(self, int_list_string, sep=","):
"""
Split | |
:returns: role info
:rtype: :class:`~c4.system.configuration.RoleInfo`
"""
# check if role exists
roleName = role.role.name
roleInfo = self.getRoleInfo(role.role)
if roleInfo is None:
roleKey = "{base}/{role}".format(base=self.ROLES, role=roleName)
self.client.put(roleKey, serialize(role))
else:
# we are already exist
self.log.error("'%s' is already defined", role)
return None
return role
def clear(self):
"""
Removes all nodes and devices from the configuration object and the database.
"""
self.client.delete_prefix("/")
def changeAlias(self, alias, node):
"""
Change the node an alias refers to
:param alias: alias
:type alias: str
:param node: node
:type node: str
:returns: alias
:rtype: str
"""
nodeKey = self.getKey(node, None)
nodeName, _ = self.client.get(nodeKey)
if not nodeName:
self.log.error("could not change alias '%s' because node '%s' does not exist", alias, node)
return None
aliasKey = "/aliases/{alias}".format(alias=alias)
# check if the alias exists
compare = [
etcd3.transactions.Version(aliasKey) > 0
]
# set new value
success = [
etcd3.transactions.Put(aliasKey, node)
]
succeeded, _ = self.client.transaction(
compare=compare,
success=success,
failure=[]
)
if not succeeded:
self.log.error("alias '%s' does not exist", alias)
return None
return alias
def changeProperty(self, node, name, propertyName, value, setIfNotExist=False):
"""
Change property property of a system or device manager to the specified value
:param node: node
:type node: str
:param name: device manager name
:type name: str
:param propertyName: property name
:type propertyName: str
:param value: property value
:type value: str
:returns: previous value
"""
key = self.getKey(node, name)
keyName, _ = self.client.get(key)
if not keyName:
return None
propertyKey = self.getKey(node, name, "properties", propertyName)
serializedValue = serialize(value)
# check if the key exists
compare = [
etcd3.transactions.Version(propertyKey) > 0
]
# get previous value and set new value
success = [
etcd3.transactions.Get(propertyKey),
etcd3.transactions.Put(propertyKey, serializedValue)
]
# just set the new value
failure = [
etcd3.transactions.Put(propertyKey, serializedValue)
]
if setIfNotExist:
succeeded, responses = self.client.transaction(
compare=compare,
success=success,
failure=failure
)
if not succeeded:
# just set a value that did not exist so no previous value
return None
else:
succeeded, responses = self.client.transaction(
compare=compare,
success=success,
failure=[]
)
if not succeeded:
self.log.error("property '%s' of '%s/%s' does not exist", propertyName, node, name)
return None
# get previous value from the the Get response of the transaction
previousValue, _ = responses[0][0]
return deserialize(previousValue)
def changeRole(self, node, role):
"""
Change role of a system manager
:param node: node
:type node: str
:param state: role
:type state: :class:`Roles`
:returns: previous role
:rtype: :class:`Roles`
"""
if not isinstance(role, Roles):
self.log.error("'%s' does not match enum of type '%s'", role, Roles)
return
key = self.getKey(node, None, "role")
serializedValue = serialize(role)
# check if the key exists
compare = [
etcd3.transactions.Version(key) > 0
]
# get previous value and set new value
success = [
etcd3.transactions.Get(key),
etcd3.transactions.Put(key, serializedValue)
]
# just set the new value
failure = [
etcd3.transactions.Put(key, serializedValue)
]
succeeded, responses = self.client.transaction(
compare=compare,
success=success,
failure=failure
)
if not succeeded:
# just set a value that did not exist so no previous value
return None
# get previous value from the the Get response of the transaction
previousValue, _ = responses[0][0]
return deserialize(previousValue)
def changeRoleInfo(self, role, info):
"""
Change the role information for a given role
:param role: role
:type role: :class:`Roles`
:param info: roleInfo
:type info: :class:`~c4.system.configuration.RoleInfo`
:returns: role info
:rtype: :class:`~c4.system.configuration.RoleInfo`
"""
key = "{base}/{role}".format(base=self.ROLES, role=role.name)
serializedValue = serialize(info)
# check if the key exists
compare = [
etcd3.transactions.Version(key) > 0
]
# get previous value and set new value
success = [
etcd3.transactions.Get(key),
etcd3.transactions.Put(key, serializedValue)
]
# just set the new value
failure = [
etcd3.transactions.Put(key, serializedValue)
]
succeeded, responses = self.client.transaction(
compare=compare,
success=success,
failure=failure
)
if not succeeded:
# just set a value that did not exist so no previous value
return None
# get previous value from the the Get response of the transaction
previousValue, _ = responses[0][0]
return deserialize(previousValue)
def changeState(self, node, name, state):
"""
Change state of a system or device manager
:param node: node
:type node: str
:param name: device manager name
:type name: str
:param state: state
:type state: :class:`States`
:returns: previous state
:rtype: :class:`States`
"""
if not isinstance(state, States):
self.log.error("'%s' does not match enum of type '%s'", state, States)
return None
stateKey = self.getKey(node, name, "state")
serializedState = serialize(state)
# check if the state stateKey exists
compare = [
etcd3.transactions.Version(stateKey) > 0
]
# get previous state and set new state
success = [
etcd3.transactions.Get(stateKey),
etcd3.transactions.Put(stateKey, serializedState)
]
succeeded, responses = self.client.transaction(
compare=compare,
success=success,
failure=[]
)
if not succeeded:
self.log.error("could not change state of '%s%s' to '%s' because it does not exist", node, "/" + name if name else "", state)
return None
# get previous state from the the Get response of the transaction
previousValue, _ = responses[0][0]
# check if we are dealing with a node and expected to set a special state
if not name and (state == States.REGISTERED or state == States.MAINTENANCE):
transaction = EtcdTransaction(self.client)
devicesPrefix = self.getKey(node, None, "devices", "")
for value, metadata in self.client.get_prefix(devicesPrefix):
if metadata.key.endswith("/state") and deserialize(value) != States.MAINTENANCE:
transaction.put(metadata.key, serializedState)
transaction.commit()
return deserialize(previousValue)
def getAliases(self):
"""
Get a mapping of aliases to node names
:returns: mappings
:rtype: dict
"""
aliasesPrefix = "/aliases/"
# note that key is the alias and value is the node name
return {
metadata.key.replace(aliasesPrefix, ""): value
for value, metadata in self.client.get_prefix(aliasesPrefix)
}
def getKey(self, node, name, *additionalParts):
"""
Assemble an etcd key based on node, device and property names
:param node: node
:type node: str
:param name: device manager name
:type name: str
:returns: key
:rtype: str
"""
keyParts = ["/nodes", node]
if name:
for namePart in name.split("."):
keyParts.append("devices")
keyParts.append(namePart)
keyParts.extend(additionalParts)
return "/".join(keyParts)
def getPlatform(self):
"""
Get platform information
:returns: platform
:rtype: :class:`~c4.system.configuration.PlatformInfo`
"""
platformName, _ = self.client.get(self.PLATFORM)
platformPrefix = "{platformKey}/".format(platformKey=self.PLATFORM)
# map from key to value and deserialize value automatically
platform = {
metadata.key : deserialize(value)
for value, metadata in self.client.get_prefix(platformPrefix)
}
# filter out settings
platformSettingsPrefix = "{settingsKey}/".format(settingsKey=self.PLATFORM_SETTINGS)
settings = {
key.replace(platformSettingsPrefix, ""): value
for key, value in platform.items()
if key.startswith(platformSettingsPrefix)
}
return PlatformInfo(
name=platformName or "unknown",
platformType=platform.get(self.PLATFORM_TYPE, "c4.system.platforms.Unknown"),
description=platform.get(self.PLATFORM_DESCRIPTION, ""),
settings=settings
)
def getProperty(self, node, name, propertyName, default=None):
"""
Get the property of a system or device manager.
:param node: node
:type node: str
:param name: device manager name
:type name: str
:param propertyName: property name
:type propertyName: str
:param default: default value to return if property does not exist
:returns: property value
"""
propertyKey = self.getKey(node, name, "properties", propertyName)
value, _ = self.client.get(propertyKey)
if value is None:
return default
return deserialize(value)
def getProperties(self, node, name=None):
"""
Get the properties of a system or device manager.
:param node: node
:type node: str
:param name: device manager name
:type name: str
:returns: properties or ``None`` if node or device does not exist
:rtype: dict
"""
key = self.getKey(node, name)
value, _ = self.client.get(key)
if value is None:
self.log.error("could not get property because '%s%s' does not exist", node, "/" + name if name else "")
return None
propertiesPrefix = self.getKey(node, name, "properties/")
# map from key to value and deserialize value automatically
properties = {
metadata.key.replace(propertiesPrefix, "") : deserialize(value)
for value, metadata in self.client.get_prefix(propertiesPrefix)
}
return properties
def getRole(self, node):
"""
Get the role of a system manager.
:param node: node
:type node: str
:returns: role
:rtype: :class:`Roles`
"""
roleKey = self.getKey(node, None, "role")
value, _ = self.client.get(roleKey)
if value is None:
self.log.error("could not get role because '%s' does not exist", node)
return None
return deserialize(value)
def getRoleInfo(self, role):
"""
Get role information for the specified role
:param role: role
:type role: :class:`Roles`
:returns: role info
:rtype: :class:`~c4.system.configuration.RoleInfo`
"""
key = "{base}/{role}".format(base=self.ROLES, role=role.name)
value, _ = self.client.get(key)
if value is None:
return None
return deserialize(value)
def getRoles(self):
"""
Get a mapping of roles to role info objects
:returns: mappings
:rtype: dict
"""
rolesPrefix = self.ROLES + "/"
# note that key is the role name and value is the role info
return {
metadata.key.replace(rolesPrefix, ""): deserialize(value)
for value, metadata in self.client.get_prefix(rolesPrefix)
}
def getState(self, node, | |
<gh_stars>1-10
"""
Provides the following multilateral methods:
* :func:`time_dummy`
* :func:`geary_khamis`
* :func:`geks`
paired with
* :func:`carli`
* :func:`jevons`
* :func:`dutot`
* :func:`laspeyres`
* :func:`paasche`
* :func:`geom_laspeyres`
* :func:`geom_paasche`
* :func:`drobish`
* :func:`marshall_edgeworth`
* :func:`palgrave`
* :func:`fisher`
* :func:`tornqvist`
* :func:`walsh`
* :func:`sato_vartia`
* :func:`geary_khamis_b`
* :func:`tpd`
* :func:`rothwell`
The TDH/TPD methods are model-based multilateral index number methods
which have been proposed to incorporate scanner data. They are part of
many multilateral methods motivated by an attempt to minimize the risk
of chain drift, particularly within a window, while maximizing the
number of matches in the data.
TDH index is used when information on item characteristics are
available, and the TPD index when this information is lacking. The
TDH produces an explicit hedonic price index, while the TPD produces
an implicit hedonic price index, which are both estimated on the
pooled data of one or more periods via an application of expenditure
shares weighted least squares regression.
"""
from typing import List, Sequence, Optional
from itertools import combinations
import pandas as pd
import numpy as np
from scipy.stats.mstats import gmean
from .bilateral import *
from .helpers import diag, _weights_calc
from .weighted_least_squares import wls
__author__ = ['Dr. <NAME>']
def geks(
df: pd.DataFrame,
price_col: str = 'price',
quantity_col: str = 'quantity',
date_col: str='month',
product_id_col: str='id',
bilateral_method: str = 'tornqvist',
) -> List:
"""
Obtain the GEKS indices paired with a bilateral method for a given dataframe.
Calculate the index values using a for loop to determine the matrix of
bilaterals, where we exploit the symmetry condition a_{i j} = 1/a_{j i} and
a_{i i} = 1 to save computation time, followed by a geometric mean.
Parameters
----------
df : pd.DataFrame
Dataframe containing the data.
price_col : str, optional
Name of the column containing the price information.
quantity_col : str, optional
Name of the column containing the quantity information.
date_col : str, optional
Name of the column containing the date information.
product_id_col : str, optional
Name of the column containing the product id information.
bilateral_method : str, optional
Returns
-------
List
List of the GEKS indices.
"""
# Reverse unstack from dynamic window func.
#df = df.stack().reset_index([date_col, product_id_col])
# Get unique periods and length of time series.
periods = df[date_col].unique()
no_of_periods = len(periods)
if bilateral_method != 'tpd':
# Obtain bilateral function for bilateral method.
bilateral_func = globals()[bilateral_method]
# Intialize matrix for bilateral pairs.
pindices = np.zeros((no_of_periods, no_of_periods))
for month_idx in combinations(range(no_of_periods), 2):
# Get period index for base and current month, and slice df for these
# months.
i, j = month_idx
df_base = df.loc[df[date_col] == periods[i]]
df_curr = df.loc[df[date_col] == periods[j]]
# Make sure the sample is matched for given periods.
df_base = df_base[df_base[product_id_col].isin(df_curr[product_id_col])]
df_curr = df_curr[df_curr[product_id_col].isin(df_base[product_id_col])]
if bilateral_method == 'tpd':
# Use multilateral TPD method with two periods.
df_matched = (
pd.concat([df_base, df_curr])
.drop_duplicates()
.drop(columns='weights')
)
# Recalculate weights for matched df.
df_matched = _weights_calc(df_matched)
# Append values to upper triangular of matrix.
pindices[i, j] = time_dummy(df_matched)[-1]
else:
# Find price and quantity vectors of base period and current period.
p_base = df_base[price_col].to_numpy()
p_curr = df_curr[price_col].to_numpy()
data = (p_base, p_curr)
# Get quantities for bilateral methods that use this information.
if bilateral_method in {
'laspeyres', 'drobish', 'marshall_edgeworth',
'geom_laspeyres', 'tornqvist', 'fisher',
'walsh', 'sato_vartia', 'geary_khamis_b',
'rothwell', 'lowe'
}:
q_base = df_base[quantity_col].to_numpy()
data += (q_base, )
if bilateral_method in {
'paasche', 'drobish','palgrave',
'marshall_edgeworth', 'geom_paasche', 'tornqvist',
'fisher', 'walsh', 'sato_vartia',
'geary_khamis_b'
}:
q_curr = df_curr[quantity_col].to_numpy()
data += (q_curr, )
# Determine the bilaterals for each base and current period and
# append to upper tringular of matrix.
pindices[i, j] = bilateral_func(*data)
# Exploit symmetry conditions for matrix of bilaterals.
pindices_sym = np.copy(pindices.T)
mask = pindices_sym != 0
pindices_sym[mask] = 1/pindices_sym[mask]
pindices += pindices_sym + np.identity(no_of_periods)
# Calculate geometric mean for the unnormalized price levels.
pgeo = gmean(pindices)
# Normalize to first period.
return pd.Series(
pgeo/pgeo[0],
index=periods,
)
def time_dummy(
df: pd.DataFrame,
price_col: str = 'price',
quantity_col: str = 'quantity',
date_col: str = 'month',
product_id_col: str = 'id',
engine: str = 'numpy'
) -> List:
"""Obtain the time dummy indices for a given dataframe.
Calculates the time dummy indices using a formula with weighted least
squares regression. When passed with characteristics, this function returns
the Time Dummy Hedonic indices. When passed without it returns the Time
Product Dummy indices.
Parameters
----------
df : pd.DataFrame
Dataframe containing the data.
price_col : str, optional
Name of the column containing the price information.
quantity_col : str, optional
Name of the column containing the quantity information.
date_col : str, optional
Name of the column containing the date information.
product_id_col : str, optional
Name of the column containing the product id information.
engine : str, optional
Name of the engine to use for the calculation.
Returns
-------
List
List of the time dummy indices.
"""
# Reverse unstack from dynamic window func.
df = df.stack().reset_index([date_col, product_id_col])
# Set the dtype for ID columns, in case it is numerical.
df[product_id_col] = df[product_id_col].astype(str)
# Calculate logarithm of the prices for each item for dependent variable.
df['log_price'] = np.log(df[price_col])
# Get time series for output index.
time_series = df[date_col].unique()
# Get terms for wls regression where characteristics are used if available.
non_time_vars = [product_id_col]
model_params = wls(
df,
dependent_var='log_price',
independent_vars=[date_col, *non_time_vars],
engine=engine
)
# Get indices from the time dummy coefficients & set first = 1.
is_time_dummy = model_params.index.str.contains(date_col)
return pd.Series(
[1, *np.exp(model_params.loc[is_time_dummy])],
index=time_series,
)
def geary_khamis(
df: pd.DataFrame,
price_col: str = 'price',
quantity_col: str = 'quantity',
date_col: str = 'month',
product_id_col: str = 'id',
method_type: str = 'matrix',
) -> List:
r"""Obtain the Geary-Khamis indices for a given dataframe.
Calculates the Geary-Khamis indices using matrix operations.
Parameters
----------
price_col : str, defaults to 'price'
User-defined price column name.
quantity_col : str, defaults to 'quantity'
User-defined quantity column name.
product_id_col : str, defaults to 'product_id'
The column name containing product ID values or product names.
method_type: str, defaults to 'matrix'
Options: {'matrix', 'iterative'}
The method type to use for the GK computation.
Returns
-------
List
The sorted list of indices for each group.
Notes
-----
For Geary-Khamis with the matrix method, we can determine the
quality adjustment factors by solving the system of equations:
.. math::
\vec{b}=\left[I_{N}-C+R\right]^{-1} \vec{c}
where :math:`\vec{c} = [1,0,\ldots, 0]^T` is an :math:`N \times 1`
vector and :math:`R` is an :math:`N \times N` matrix given by,
.. math::
R=\left[\begin{array}{cccc}
1 & 1 & \ldots & 1 \\
0 & \ldots & \ldots & 0 \\
\vdots & & & \vdots \\
0 & \ldots & \ldots & 0
\end{array}\right]
and :math:`C` is the :math:`N \times N` matrix defined by,
.. math::
C=\hat{q}^{-1} \sum_{t=1}^{T} s^{t} q^{t \mathbf{T}}
where :math:`\hat{q}^{-1}` is the inverse of an :math:`N \times N`
diagonal matrix :math:`\hat{q}`, where the diagonal elements are the
total quantities purchased for each good over all time periods,
:math:`s^{t}` is a vector of the expenditure shares for time period
:math:`t`, and :math:`q^{t \mathbf{T}}` is the transpose of the
vector of quantities purchased in time period :math:`t`.
Once the :math:`\vec{b}` vector has been calculated, the price
levels can be computed from the equation:
.. math::
P_{t} =\frac{p^{t} \cdot q^{t}}{ \vec{b} \cdot q^{t}}
The price index values can be determined by normalizing the price
levels by the first period as,
.. math::
I_{t} = \frac{P_{t}}{P_{0}}
References
----------
<NAME>, and <NAME>. (2017). Substitution Bias in
Multilateral Methods for CPI Construction Using Scanner Data.
Discussion Paper 1702. Department of Economics, University of
British Columbia.
"""
if method_type not in ('matrix', 'iterative'):
raise ValueError('The method type must be `matrix` or `iterative`')
# We need to deal with missing values and reshape the df for the
# required vectors and matrices.
df = _matrix_method_reshape(df)
# Get number of unique products for the size of the vectors and
# matrices.
N = len(df.index.unique(level=product_id_col))
# Matrices for the prices, quantities and weights.
prices = df.loc[price_col]
quantities = df.loc[quantity_col]
weights = df.loc['weights']
# Use iterative method directly if specified.
if method_type == 'iterative':
return _geary_khamis_iterative(prices, quantities)
# Inverse of diagonal matrix with total quantities | |
The time at which the partition was created.
- **LastAccessTime** *(datetime) --*
The last time at which the partition was accessed.
- **StorageDescriptor** *(dict) --*
Provides information about the physical location where the partition is stored.
- **Columns** *(list) --*
A list of the ``Columns`` in the table.
- *(dict) --*
A column in a ``Table`` .
- **Name** *(string) --*
The name of the ``Column`` .
- **Type** *(string) --*
The datatype of data in the ``Column`` .
- **Comment** *(string) --*
Free-form text comment.
- **Location** *(string) --*
The physical location of the table. By default this takes the form of the warehouse location, followed by the database location in the warehouse, followed by the table name.
- **InputFormat** *(string) --*
The input format: ``SequenceFileInputFormat`` (binary), or ``TextInputFormat`` , or a custom format.
- **OutputFormat** *(string) --*
The output format: ``SequenceFileOutputFormat`` (binary), or ``IgnoreKeyTextOutputFormat`` , or a custom format.
- **Compressed** *(boolean) --*
True if the data in the table is compressed, or False if not.
- **NumberOfBuckets** *(integer) --*
Must be specified if the table contains any dimension columns.
- **SerdeInfo** *(dict) --*
Serialization/deserialization (SerDe) information.
- **Name** *(string) --*
Name of the SerDe.
- **SerializationLibrary** *(string) --*
Usually the class that implements the SerDe. An example is: ``org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe`` .
- **Parameters** *(dict) --*
These key-value pairs define initialization parameters for the SerDe.
- *(string) --*
- *(string) --*
- **BucketColumns** *(list) --*
A list of reducer grouping columns, clustering columns, and bucketing columns in the table.
- *(string) --*
- **SortColumns** *(list) --*
A list specifying the sort order of each bucket in the table.
- *(dict) --*
Specifies the sort order of a sorted column.
- **Column** *(string) --*
The name of the column.
- **SortOrder** *(integer) --*
Indicates that the column is sorted in ascending order (``== 1`` ), or in descending order (``==0`` ).
- **Parameters** *(dict) --*
User-supplied properties in key-value form.
- *(string) --*
- *(string) --*
- **SkewedInfo** *(dict) --*
Information about values that appear very frequently in a column (skewed values).
- **SkewedColumnNames** *(list) --*
A list of names of columns that contain skewed values.
- *(string) --*
- **SkewedColumnValues** *(list) --*
A list of values that appear so frequently as to be considered skewed.
- *(string) --*
- **SkewedColumnValueLocationMaps** *(dict) --*
A mapping of skewed values to the columns that contain them.
- *(string) --*
- *(string) --*
- **StoredAsSubDirectories** *(boolean) --*
True if the table data is stored in subdirectories, or False if not.
- **Parameters** *(dict) --*
These key-value pairs define partition parameters.
- *(string) --*
- *(string) --*
- **LastAnalyzedTime** *(datetime) --*
The last time at which column statistics were computed for this partition.
:type CatalogId: string
:param CatalogId:
The ID of the Data Catalog where the partitions in question reside. If none is supplied, the AWS account ID is used by default.
:type DatabaseName: string
:param DatabaseName: **[REQUIRED]**
The name of the catalog database where the partitions reside.
:type TableName: string
:param TableName: **[REQUIRED]**
The name of the partitions\' table.
:type Expression: string
:param Expression:
An expression filtering the partitions to be returned.
The expression uses SQL syntax similar to the SQL ``WHERE`` filter clause. The SQL statement parser `JSQLParser <http://jsqlparser.sourceforge.net/home.php>`__ parses the expression.
*Operators* : The following are the operators that you can use in the ``Expression`` API call:
=
Checks if the values of the two operands are equal or not; if yes, then the condition becomes true.
Example: Assume \'variable a\' holds 10 and \'variable b\' holds 20.
(a = b) is not true.
< >
Checks if the values of two operands are equal or not; if the values are not equal, then the condition becomes true.
Example: (a < > b) is true.
>
Checks if the value of the left operand is greater than the value of the right operand; if yes, then the condition becomes true.
Example: (a > b) is not true.
<
Checks if the value of the left operand is less than the value of the right operand; if yes, then the condition becomes true.
Example: (a < b) is true.
>=
Checks if the value of the left operand is greater than or equal to the value of the right operand; if yes, then the condition becomes true.
Example: (a >= b) is not true.
<=
Checks if the value of the left operand is less than or equal to the value of the right operand; if yes, then the condition becomes true.
Example: (a <= b) is true.
AND, OR, IN, BETWEEN, LIKE, NOT, IS NULL
Logical operators.
*Supported Partition Key Types* : The following are the the supported partition keys.
* ``string``
* ``date``
* ``timestamp``
* ``int``
* ``bigint``
* ``long``
* ``tinyint``
* ``smallint``
* ``decimal``
If an invalid type is encountered, an exception is thrown.
The following list shows the valid operators on each type. When you define a crawler, the ``partitionKey`` type is created as a ``STRING`` , to be compatible with the catalog partitions.
*Sample API Call* :
:type Segment: dict
:param Segment:
The segment of the table\'s partitions to scan in this request.
- **SegmentNumber** *(integer) --* **[REQUIRED]**
The zero-based index number of the this segment. For example, if the total number of segments is 4, SegmentNumber values will range from zero through three.
- **TotalSegments** *(integer) --* **[REQUIRED]**
The total numer of segments.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class GetSecurityConfigurations(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`Glue.Client.get_security_configurations`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetSecurityConfigurations>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'SecurityConfigurations': [
{
'Name': 'string',
'CreatedTimeStamp': datetime(2015, 1, 1),
'EncryptionConfiguration': {
'S3Encryption': [
{
'S3EncryptionMode': 'DISABLED'|'SSE-KMS'|'SSE-S3',
'KmsKeyArn': 'string'
},
],
'CloudWatchEncryption': {
'CloudWatchEncryptionMode': 'DISABLED'|'SSE-KMS',
'KmsKeyArn': 'string'
},
'JobBookmarksEncryption': {
'JobBookmarksEncryptionMode': 'DISABLED'|'CSE-KMS',
'KmsKeyArn': 'string'
}
}
},
],
}
**Response Structure**
- *(dict) --*
- **SecurityConfigurations** *(list) --*
A list of security configurations.
- *(dict) --*
Specifies a security configuration.
- **Name** *(string) --*
The name of the security configuration.
- **CreatedTimeStamp** *(datetime) --*
The time at which this security configuration was created.
- **EncryptionConfiguration** *(dict) --*
The encryption configuration associated with this security configuration.
- **S3Encryption** *(list) --*
The encryption configuration for S3 data.
- *(dict) --*
Specifies how S3 data should be encrypted.
- **S3EncryptionMode** *(string) --*
The encryption mode to use for S3 data.
- **KmsKeyArn** *(string) --*
The AWS ARN of the KMS key to be used to encrypt the data.
- **CloudWatchEncryption** *(dict) --*
The encryption configuration for CloudWatch.
- **CloudWatchEncryptionMode** *(string) --*
The encryption mode to use for CloudWatch data.
- **KmsKeyArn** *(string) --*
The AWS ARN of the KMS key to be used to encrypt the data.
- **JobBookmarksEncryption** *(dict) --*
The encryption configuration | |
import datetime
import numpy as np
from fixed_params import *
import utils
def get_transition_sigmoid(inflection_day, rate_of_inflection, init_r_0, lockdown_r_0):
"""Returns a sigmoid function based on the specified parameters.
A sigmoid helps smooth the transition between init_r_0 and lockdown_r_0,
with the midpoint being inflection_day.
rate_of_inflection is typically a value between 0-1, with 1 being a very steep
transition. We typically use 0.2-0.5 in our projections.
"""
assert 0 < rate_of_inflection <= 1, rate_of_inflection
assert 0 < init_r_0 <= 10, init_r_0
assert 0 <= lockdown_r_0 <= 10, lockdown_r_0
shift = inflection_day
a = rate_of_inflection
b = init_r_0 - lockdown_r_0
c = lockdown_r_0
return utils.inv_sigmoid(shift, a, b, c)
class RegionModel:
"""
The main class to capture a region and its single set of parameters.
This object is instantiated and then passed to our SEIR simulator to simulate
infections, hospitalizations and deaths based on the internal parameters.
"""
def __init__(self, country_str, region_str, subregion_str,
first_date, projection_create_date,
projection_end_date,
region_params=dict(),
actual_deaths_smooth=None,
compute_hospitalizations=False):
"""
Parameters
----------
country_str : str
Name of the country (e.g. US, Canada)
region_str : str
Name of the region (e.g. CA, DC)
subregion_str : str
Name of the subregion - county for US, provinces/states for international.
(e.g. Los Angeles County, Alberta)
first_date : datetime.date
First date of the simulation
projection_create_date : datetime.date
The date when the projection is being generated.
This date is usually present day, unless we are doing validation testing,
in which case we use a day in the past so we can compare projections to OOS data.
region_params : dict, optional
Additional metadata for a region, such as population and hospital beds.
actual_deaths_smooth : np.array, optional
Smoothed version of the deaths.
compute_hospitalizations : bool, optional
Whether to compute hospitalization estimates (default False)
"""
self.country_str = country_str
self.region_str = region_str
self.subregion_str = subregion_str
self.first_date = first_date
self.projection_create_date = projection_create_date
self.projection_end_date = projection_end_date
self.region_params = region_params
self.actual_deaths_smooth = actual_deaths_smooth
self.compute_hospitalizations = compute_hospitalizations
self.country_holidays = None
self.N = (self.projection_end_date - self.first_date).days + 1
assert self.N > DAYS_BEFORE_DEATH, 'Need N to be at least DAYS_BEFORE_DEATH'
if projection_create_date:
assert first_date < projection_create_date, \
'First date must be before projection create date'
assert projection_create_date < projection_end_date, \
'Projection create date must be before project end date'
def init_params(self, params_tups):
"""Initializes the object by saving the parameters that are passed in.
This function also builds the R values for each day of the simulation,
as well as the IFR values for each day.
Note: This must be called before running the simulation.
Parameters
----------
params_tups : tuple
This is a tuple of (param_name, param_value) tuples.
Example: (('INITIAL_R_0', 2.2), ('LOCKDOWN_R_0', 0.9), etc.)
"""
assert isinstance(params_tups, tuple), 'must be a tuple of tuples'
for k, v in params_tups:
if k in ['INFLECTION_DAY', 'REOPEN_DATE']:
assert v >= self.first_date, \
f'{k} {v} must be after first date {self.first_date}'
setattr(self, k, v)
assert self.REOPEN_DATE > self.INFLECTION_DAY, \
f'reopen date {self.REOPEN_DATE} must be after inflection day {self.INFLECTION_DAY}'
self.params_tups = params_tups
self.post_reopening_r_decay = self.get_post_reopening_r_decay()
self.R_0_ARR = self.build_r_0_arr()
self.ifr_arr = self.build_ifr_arr()
self.undetected_deaths_ratio_arr = self.build_undetected_deaths_ratio_arr()
def get_post_reopening_r_decay(self):
"""Calculates the post-reopening R decay.
Full description at https://covid19-projections.com/about/#post-reopening
If there is no POST_REOPENING_R_DECAY parameter passed in, we use a random
uniform distribution to generate the post-reopening ratio to model uncertainty.
"""
if hasattr(self, 'POST_REOPENING_R_DECAY'):
return self.POST_REOPENING_R_DECAY
# we randomly sample from a triangular distribution to get the post_reopening_r_decay
if hasattr(self, 'custom_post_reopening_r_decay_range'):
low, mode, high = self.custom_post_reopening_r_decay_range
elif self.country_str in EARLY_IMPACTED_COUNTRIES:
low, mode, high = 0.995, 0.998, 0.999 # mean is ~0.9973
elif self.has_us_seasonality():
low, mode, high = 0.995, 0.9975, 1 # mean is ~0.9975
else:
low, mode, high = 0.996, 0.998, 1 # mean is ~0.998
post_reopening_r_decay = np.random.triangular(low, mode, high)
assert 0 < post_reopening_r_decay <= 1
return post_reopening_r_decay
def get_fall_r_multiplier(self):
"""We currently assume a minor uptick in R in the fall for seasonal countries.
Full description at https://covid19-projections.com/about/#fall-wave
"""
if not self.has_us_seasonality():
return 1
low, mode, high = 1, 1.001, 1.002 # mean is ~1.001
fall_r_mult = np.random.triangular(low, mode, high)
return fall_r_mult
def get_max_post_open_r(self):
"""Return the max post-open R depending on the region type.
Country-wide projections have a lower post-open R due to lower variability.
"""
if self.subregion_str:
return MAX_POST_REOPEN_R + 0.025
elif self.region_str != 'ALL':
return MAX_POST_REOPEN_R + 0.01
else:
return MAX_POST_REOPEN_R
def all_param_tups(self):
"""Returns all parameters as a tuple of (param_name, param_value) tuples."""
all_param_tups = list(self.params_tups[:])
for addl_param in ['post_reopening_r_decay']:
all_param_tups.append((addl_param.upper(), getattr(self, addl_param)))
return tuple(all_param_tups)
def build_r_0_arr(self):
"""Returns an array of the reproduction numbers (R) for each day.
Each element in the array represents a single day in the simulation.
For example, if self.first_date is 2020-03-01 and self.projection_end_date
is 2020-09-01, then R_0_ARR[10] would be the R value on 2020-03-11.
Full description at: https://covid19-projections.com/about/#effective-reproduction-number-r
and https://covid19-projections.com/model-details/#modeling-the-r-value
We use three different R values: R0, post-mitigation R, and reopening R.
We use an inverse logistic/sigmoid function to smooth the transition between
the three R values.
To compute the reopen R, we apply a multiplier REOPEN_R_MULT to the lockdown R.
We map this multiplier to reopen_mult, which assumes greater growth if the
initial lockdown R is effective.
e.g. 10% growth for R=1->1.1, but 10% growth for R=0.7 -> (2-0.7)**0.5*1.1*.7 = 0.88
reopen_mult becomes 1 at around R=1.17 (i.e. no increase on reopening)
Sample code below to compare the difference:
mult = 1.1
for lockdown_r in np.arange(0.5,1.21,0.05):
orig_reopen_r = mult * lockdown_r
reopen_mult = max(1, (2-lockdown_r)**0.5*mult)
new_reopen_r = reopen_mult * lockdown_r
print(lockdown_r, orig_reopen_r, new_reopen_r)
"""
assert 1 <= self.REOPEN_R_MULT <= 10, self.REOPEN_R_MULT
reopen_mult = max(1, (2-self.LOCKDOWN_R_0)**0.5 * self.REOPEN_R_MULT)
reopen_r = reopen_mult * self.LOCKDOWN_R_0
max_post_open_r = self.get_max_post_open_r()
post_reopening_r = min(max(max_post_open_r, self.LOCKDOWN_R_0), reopen_r)
assert reopen_r >= self.LOCKDOWN_R_0, 'Reopen R must be >= lockdown R'
reopen_date_shift = self.REOPEN_DATE + \
datetime.timedelta(days=int(self.REOPEN_SHIFT_DAYS) + DEFAULT_REOPEN_SHIFT_DAYS)
fatigue_idx = self.inflection_day_idx + DAYS_UNTIL_LOCKDOWN_FATIGUE
reopen_idx = self.get_day_idx_from_date(reopen_date_shift)
lockdown_reopen_midpoint_idx = (self.inflection_day_idx + reopen_idx) // 2
if self.LOCKDOWN_R_0 <= 1:
# we wait longer before applying the post-reopening decay to allow for
# longer reopening time (since R_t <= 1)
days_until_post_reopening = 30
else:
days_until_post_reopening = 15
post_reopening_idx = reopen_idx + days_until_post_reopening
fall_start_idx = self.get_day_idx_from_date(FALL_START_DATE_NORTH)
sig_lockdown = get_transition_sigmoid(
self.inflection_day_idx, self.RATE_OF_INFLECTION, self.INITIAL_R_0, self.LOCKDOWN_R_0)
sig_fatigue = get_transition_sigmoid(
fatigue_idx, 0.2, 1, self.LOCKDOWN_FATIGUE)
sig_reopen = get_transition_sigmoid(
reopen_idx, 0.2, self.LOCKDOWN_R_0, post_reopening_r)
dates = utils.date_range(self.first_date, self.projection_end_date)
assert len(dates) == self.N
# how much to drop post_reopening_r R to get to 1 (max 0.9)
min_post_reopening_total_decay = min(0.9, 1 / post_reopening_r)
R_0_ARR = [self.INITIAL_R_0]
for day_idx in range(1, self.N):
if day_idx < lockdown_reopen_midpoint_idx:
r_t = sig_lockdown(day_idx)
else:
post_reopening_total_decay = fall_r_mult = 1
if day_idx > post_reopening_idx:
assert day_idx > reopen_idx, day_idx
post_reopening_total_decay = max(
min_post_reopening_total_decay,
self.post_reopening_r_decay**(day_idx-post_reopening_idx))
assert 0 < post_reopening_total_decay <= 1, post_reopening_total_decay
if day_idx > fall_start_idx:
fall_r_mult = min(
1.1, self.get_fall_r_multiplier()**(day_idx-fall_start_idx))
assert 1 <= fall_r_mult < 2, fall_r_mult
r_t = sig_reopen(day_idx) * post_reopening_total_decay * fall_r_mult
r_t *= sig_fatigue(day_idx)
# Make sure R is stable
if day_idx > reopen_idx and abs(r_t / R_0_ARR[-1] - 1) > 0.1:
assert False, f'R changed too quickly: {day_idx} {r_t} {R_0_ARR}'
R_0_ARR.append(r_t)
assert len(R_0_ARR) == self.N
return R_0_ARR
def build_ifr_arr(self):
"""Returns an array of the infection fatality rates for each day.
Each element in the array represents a single day in the simulation.
For example, if self.first_date is 2020-03-01 and self.projection_end_date
is 2020-09-01, then ifr_arr[10] would be the IFR on 2020-03-11.
Full description at: https://covid19-projections.com/about/#infection-fatality-rate-ifr
"""
assert 0.99 <= MORTALITY_MULTIPLIER <= 1, MORTALITY_MULTIPLIER
assert 0 < self.MORTALITY_RATE < 0.2, self.MORTALITY_RATE
ifr_arr = []
for idx in range(self.N):
if self.country_str in EARLY_IMPACTED_COUNTRIES:
# lower IFR after 45 days due to improving treatments/fewer nursing home deaths
ifr_mult = max(MIN_MORTALITY_MULTIPLIER, MORTALITY_MULTIPLIER**(max(0, idx - 45)))
else:
# slower rise in other countries, so we use 120 days
ifr_mult = max(MIN_MORTALITY_MULTIPLIER, MORTALITY_MULTIPLIER**(max(0, idx - 120)))
assert 0 < MIN_MORTALITY_MULTIPLIER < 1, MIN_MORTALITY_MULTIPLIER
assert MIN_MORTALITY_MULTIPLIER <= ifr_mult <= 1, ifr_mult
ifr = self.MORTALITY_RATE * ifr_mult
ifr_arr.append(ifr)
return ifr_arr
def build_undetected_deaths_ratio_arr(self):
"""Return an array of the percent of deaths that are undetected for each day.
We assume the percentage of undetected deaths will be high in the initial days
| |
<reponame>CodeLionX/CommentSearchEngine
import os
import re
import functools
from cse.lang import PreprocessorBuilder
from cse.lang.PreprocessorStep import PreprocessorStep
from cse.indexing import (FileIndexer, IndexReader, DocumentMap)
from cse.indexing import DOCUMENT_MAP_NAME, DOCUMENT_MAP_DICT_NAME
from cse.reader import CommentReader
from cse.BooleanQueryParser import (BooleanQueryParser, Operator)
from cse.Ranker import Ranker
from cse.util import Util
class SearchEngine():
def __init__(self, directory, commentsFilename, articleFilename, authorFilename):
self.__directory = directory
self.__commentsFilename = commentsFilename
self.__articleFilename = articleFilename
self.__authorFilename = authorFilename
self.__prep = (
PreprocessorBuilder()
.useNltkTokenizer()
.useNltkStopwordList()
.usePorterStemmer()
.addCustomStepToEnd(CustomPpStep())
.build()
)
self.__pattern_whitespace = '[^\S\x0a\x0d]*'
self.__pattern_keyword = '[\w\d]+'
self.__pattern_prefix = self.__pattern_keyword + '\*'
self.__pattern_phrase = '\'' + self.__pattern_keyword + '(' + self.__pattern_whitespace + self.__pattern_keyword + ')*\''
self.__boolQueryPattern = re.compile(
'^(([\w\d*:]+|(' + self.__pattern_phrase + '))'
+ self.__pattern_whitespace
+ '(NOT|OR|AND)'
+ self.__pattern_whitespace
+ '([\w\d*:]+|(' + self.__pattern_phrase + ')))+$',
re.M
)
self.__prefixQueryPattern = re.compile(
'^' + self.__pattern_whitespace + self.__pattern_prefix + self.__pattern_whitespace + '$',
re.I | re.M
)
self.__phraseQueryPattern = re.compile(
'^' + self.__pattern_whitespace + self.__pattern_phrase + self.__pattern_whitespace + '$',
re.I | re.M
)
self.__indexLoaded = False
self.__index = None
self.__documentMap = None
self.__commentReader = None
def loadIndex(self):
if self.__indexLoaded:
print(self.__class__.__name__ + ":", "index already loaded")
return
print(self.__class__.__name__ + ":", "loading index files and comment reader")
self.__index = IndexReader(
self.__directory
)
self.__documentMap = DocumentMap(
os.path.join(self.__directory, DOCUMENT_MAP_NAME),
os.path.join(self.__directory, DOCUMENT_MAP_DICT_NAME)
).open()
self.__commentReader = CommentReader(
os.path.join(self.__directory, self.__commentsFilename),
os.path.join(self.__directory, self.__articleFilename),
os.path.join(self.__directory, self.__authorFilename)
).open()
self.__indexLoaded = True
def releaseIndex(self):
if not self.__indexLoaded:
print(self.__class__.__name__ + ":", "no index loaded, nothing to release")
return
print(self.__class__.__name__ + ":", "releasing index files and comment reader")
self.__index.close()
self.__documentMap.close()
self.__commentReader.close()
self.__indexLoaded = False
def index(self):
if self.__indexLoaded:
self.releaseIndex()
FileIndexer(
self.__directory,
self.__commentsFilename,
self.__articleFilename,
self.__authorFilename,
self.__prep
).index()
def close(self):
self.releaseIndex()
def search(self, query, idsOnly=False, topK=10):
if not self.__indexLoaded:
print("Index was not loaded!")
return []
results = []
print()
if self.__boolQueryPattern.fullmatch(query):
print("##### Boolean Query Search")
results = self.__booleanSearch(query)
elif self.__phraseQueryPattern.fullmatch(query):
print("##### Phrase Search")
results = self.__phraseSearch(query, topK)
elif query.startswith('ReplyTo:'):
print("##### ReplyTo Search")
results = self.__replyToSearch(query)
elif self.__prefixQueryPattern.fullmatch(query):
print("##### Prefix Search")
results = self.__prefixSearch(query, topK)
elif re.search('NOT|AND|OR|[*]', query):
print("*** ERROR ***")
print("Query >>> {} <<< not supported. Please use the following search options:\n".format(query.strip()) +
" - Keyword Search: Use one or more words to search for: e.g. `donald trump news`\n" +
" - Phrase Search: Exact word order match: e.g. `'christams market'`\n" +
" - ReplyTo Search: Search for replies to a parent comment: e.g. `ReplyTo:12345` (no whitespace allowed between keyword and comment ID, also use exact keyword `ReplyTo` [case sensitive])\n" +
" - Prefix Search: Search for comments containing words with a specifc prefix: e.g. `euro*`\n" +
" - Boolean Search: Please use only one of the following binary Operators or none: `NOT`, `AND`, `OR`. You are allowed to use single-keyword-queries, prefix-queries, replyTo-queries and phrase-queries between binary operators"
)
return []
else:
print("##### Keyword Search")
results = self.__keywordSearch(query, topK)
print("##### Query for >>>", query.strip(), "<<< returned", len(results), "of k=" + str(topK) + "(not considered for boolean search) requested comments")
# print(" CIDs:", results)
if idsOnly:
return results
else:
return zip(results, self.__loadDocumentTextForCids(results))
def __booleanSearch(self, query):
# operator precedence: NOT > AND > OR
p = BooleanQueryParser(query).get()
cidSets = []
# filter out operators
# note: we only support one opperator kind per query at the moment!
op = None
if Operator.NOT in p: op = Operator.NOT
elif Operator.OR in p: op = Operator.OR
elif Operator.AND in p: op = Operator.AND
else:
print("No or wrong operator in query!!")
return []
# check if query contains any other operator (we do not support this!)
for operator in set(Operator)-set([op]):
if operator in p:
print("Only a single Operator-Type is supported in boolean search queries! Found:", op, operator)
return []
terms = [term for term in p if term not in Operator]
# load document set per term
for term in terms:
cids = []
if term.strip().endswith("*"):
cids = self.__prefixSearchTerm(term)
elif term.strip().startswith("ReplyTo:"):
cids = self.__replyToSearch(term)
elif self.__phraseQueryPattern.fullmatch(term):
cids = self.__phraseSearch(term, None)
else:
pTerm = self.__prep.processText(term)
if not pTerm or len(pTerm) > 1:
print(
self.__class__.__name__ + ":", "term", term,
"is invalid! Please use only one word for boolean queries."
)
return []
cids = [cid for cid, _, _ in self.__index.postingList(pTerm[0][0])]
if cids:
cidSets.append(set(cids))
if not cidSets:
return []
# we are able to sort the cid sets based on their length, because we only have one kind of operators
# this should speed up the combination a little bit
# this would lead to wrong results with the NOT operator
if op != Operator.NOT:
cidSets.sort(key=lambda cidSet: len(cidSet))
firstCids = cidSets[0]
cidSets.remove(firstCids)
cids = functools.reduce(self.__cidSetCombiner(op), cidSets, firstCids)
return cids
def __phraseSearch(self, query, topK):
queryTermTuples = self.__prep.processText(query.replace("'", ""))
queryTerms = [term for term, _ in queryTermTuples]
# use ranking:
idfs = {}
ranker = Ranker(None) # do not use topK restriction!
# determine documents with ordered consecutive query terms
first = True
cidPosTuples = {}
for term in queryTerms:
postingListEntry = self.__index.retrieve(term)
if postingListEntry.idf():
idfs[term] = postingListEntry.idf()
if not postingListEntry.postingList():
cidPosTuples = {}
return []
elif first:
for cid, tf, posList in postingListEntry.postingList():
cidPosTuples[cid] = posList
ranker.documentTerm(cid, term, tf, postingListEntry.idf())
first = False
else:
newCidPosTuples = {}
for cid, tf, posList in postingListEntry.postingList():
newCidPosTuples[cid] = posList
ranker.documentTerm(cid, term, tf, postingListEntry.idf())
cidPosTuples = self.__documentsWithConsecutiveTerms(cidPosTuples, newCidPosTuples)
ranker.queryTerms(queryTerms, idfs)
ranker.filterDocumentTermWeightsBy(lambda cid: cid in cidPosTuples)
rankedCids = ranker.rank()
# look for stopwords and filter out all docs not containing the whole phrase
# this preserves rank ordering!
tokenizer = PreprocessorBuilder().useNltkTokenizer().build()
termsWithSW = [term for term, index in tokenizer.processText(query)]
filteredRankedCids = []
for rank, score, rankedCid in rankedCids:
text = self.__loadDocumentTextForCids([rankedCid])[0]
documentTerms = [term for term, index in tokenizer.processText(text)]
if Util.seq_in_seq(termsWithSW, documentTerms):
filteredRankedCids.append((rank, score, rankedCid))
rankedCids = filteredRankedCids
return set([cid for _, _, cid in rankedCids])
def __replyToSearch(self, query):
queryParts = query.strip().split(':')
if len(queryParts) > 2:
print(
self.__class__.__name__ + ":", query,
"is invalid! Please use only reply to queries with the following schema:",
"`ReplyTo:<numeric comment id>`, eg. `ReplyTo:12345`"
)
return []
try:
parentCid = int(queryParts[1])
except ArithmeticError:
print(
self.__class__.__name__ + ":", query,
"is invalid! Please use only reply to queries with the following schema:",
"`ReplyTo:<numeric comment id>`, eg. `ReplyTo:12345`"
)
# load child cids and return them
return self.__index.repliedTo(parentCid)
def __keywordSearch(self, query, topK):
idfs = {}
queryTerms = []
# use ranking:
ranker = Ranker(topK)
queryTermTuples = self.__prep.processText(query)
queryTerms = [term for term, _ in queryTermTuples]
for term in queryTerms:
postingListEntry = self.__index.retrieve(term)
if postingListEntry.idf():
idfs[term] = postingListEntry.idf()
if postingListEntry.postingList():
for cid, tf, _ in postingListEntry.postingList():
ranker.documentTerm(cid, term, tf, postingListEntry.idf())
# calculate query term weights
ranker.queryTerms(queryTerms, idfs)
rankedCids = ranker.rank()
#for rank, score, cid in rankedCids:
# print(rank, score, cid)
return set([cid for _, _, cid in rankedCids])
def __prefixSearch(self, term, topK):
term = term.replace("*", "")
# get prefix matching terms
matchedTerms = [token for token in self.__index.terms() if token.startswith(term)]
ranker = Ranker(topK)
# load posting list
idfs = {}
for t in matchedTerms:
postingListEntry = self.__index.retrieve(t)
if postingListEntry.idf():
idfs[t] = postingListEntry.idf()
if postingListEntry.postingList():
for cid, tf, _ in postingListEntry.postingList():
ranker.documentTerm(cid, t, tf, postingListEntry.idf())
ranker.queryTerms(matchedTerms, idfs)
rankedCids = ranker.rank()
return set([cid for _, _, cid in rankedCids])
def __prefixSearchTerm(self, term):
"""
This should only used internally. No ranking is performed. All additional information
of the index is lost (like tf, idf, positions, ...).
Only returns all document IDs containing at least one matched term.
"""
term = term.replace("*", "")
# get prefix matching terms
matchedTerms = [token for token in self.__index.terms() if token.startswith(term)]
cids = set()
for t in matchedTerms:
cids.update((cid for cid, _, _ in self.__index.postingList(t)))
return cids
def __loadDocumentTextForCids(self, cids):
results = []
if cids is None or cids is []:
return results
# get document pointers and load comment texts
for cid in cids:
try:
pointer = self.__documentMap.get(cid)
rowData = self.__commentReader.readline(pointer)
results.append(rowData["comment_text"])
except KeyError:
print(self.__class__.__name__ + ":", "comment", cid, "not found!")
return results
def __documentsWithConsecutiveTerms(self, firstTermTuples, secondTermTuples):
# documents containing both terms:
cids = [cid for cid in firstTermTuples if cid in secondTermTuples]
# check for consecutive term positions
resultCidTuples = {}
for cid in cids:
for pos in firstTermTuples[cid]:
if pos+1 in | |
<gh_stars>10-100
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for playback_client."""
import json
from typing import Callable, List, Optional, Tuple, TypeVar, Union
import unittest
from pyreach import core
from pyreach.common.python import types_gen
from pyreach.impl import playback_client
from pyreach.impl import test_utils
from pyreach.impl import utils
# Some example (time, sequence) pairs for testing the seek() function for data
# elements that do not actually exist.
_test_invalid_seqs = [
(0.0, 0),
(1.0, 0),
(1.0, 1),
(2.001, 3),
(2.0, 2),
(1.001, None),
(1.2, None),
(None, 4),
(None, 0),
(0.0, None),
]
class PlaybackClientTest(unittest.TestCase):
"""Implement a test of the playback client and subcomponents.
Performs a detailed test of the playback client logic by testing each
component against simulated device-data and command-data logs. In addition,
there are some re-usable functions for testing implementations of the
iterators and playback clients.
"""
def test_client_simulator_empty(self) -> None:
"""Test the client simulator with a log that contains no clients.
Variant one of four - a completely empty log.
"""
# Create empty log.
device_data = TestDeviceDataIterator([])
command_data = TestCommandDataIterator([])
device_data.start()
command_data.start()
# Client simulator will fail to find a client, since the log is empty.
self.assertRaises(core.PyReachError, playback_client.ClientSimulator, None,
device_data, command_data, True)
device_data.reset()
command_data.reset()
# Client simulator will fail to find the "invalid" client, since the log
# is empty and contains no clients.
self.assertRaises(core.PyReachError, playback_client.ClientSimulator,
"invalid", device_data, command_data, True)
def test_client_simulator_empty_no_object(self) -> None:
"""Test the client simulator with a log that contains no clients.
Variant one of four - a client state data missing the object.
"""
# Create a log with no clients despite having an empty client state object.
device_data = TestDeviceDataIterator([
types_gen.DeviceData(
device_type="session-manager", data_type="connected-clients")
])
command_data = TestCommandDataIterator([])
device_data.start()
command_data.start()
# Client simulator will fail to find a client, since the log is empty.
self.assertRaises(core.PyReachError, playback_client.ClientSimulator, None,
device_data, command_data, True)
device_data.reset()
command_data.reset()
# Client simulator will fail to find the "invalid" client, since the log
# is empty and contains no clients.
self.assertRaises(core.PyReachError, playback_client.ClientSimulator,
"invalid", device_data, command_data, True)
def test_client_simulator_empty_object(self) -> None:
"""Test the client simulator with a log that contains no clients.
Variant one of four - a client state data with no client list.
"""
# Create a log with no clients despite having an empty client state object.
device_data = TestDeviceDataIterator([
types_gen.DeviceData(
device_type="session-manager",
data_type="connected-clients",
connected_clients=types_gen.ConnectedClients())
])
command_data = TestCommandDataIterator([])
device_data.start()
command_data.start()
# Client simulator will fail to find a client, since the log is empty.
self.assertRaises(core.PyReachError, playback_client.ClientSimulator, None,
device_data, command_data, True)
device_data.reset()
command_data.reset()
# Client simulator will fail to find the "invalid" client, since the log
# is empty and contains no clients.
self.assertRaises(core.PyReachError, playback_client.ClientSimulator,
"invalid", device_data, command_data, True)
def test_client_simulator_empty_object_list(self) -> None:
"""Test the client simulator with a log that contains no clients.
Variant one of four - a client state data with any empty client list.
"""
# Create a log with no clients despite having an empty client state object.
device_data = TestDeviceDataIterator([
types_gen.DeviceData(
device_type="session-manager",
data_type="connected-clients",
connected_clients=types_gen.ConnectedClients([]))
])
command_data = TestCommandDataIterator([])
device_data.start()
command_data.start()
# Client simulator will fail to find a client, since the log is empty.
self.assertRaises(core.PyReachError, playback_client.ClientSimulator, None,
device_data, command_data, True)
device_data.reset()
command_data.reset()
# Client simulator will fail to find the "invalid" client, since the log
# is empty and contains no clients.
self.assertRaises(core.PyReachError, playback_client.ClientSimulator,
"invalid", device_data, command_data, True)
def test_client_simulator_find_client(self) -> None:
"""Test the client simulator searching for a client in data."""
device_data = TestDeviceDataIterator([
types_gen.DeviceData(
device_type="session-manager",
data_type="connected-clients",
connected_clients=types_gen.ConnectedClients([
types_gen.ConnectedClient(uid="client-1"),
types_gen.ConnectedClient(uid="client-2"),
]))
])
command_data = TestCommandDataIterator([])
device_data.start()
command_data.start()
# We expect the client simulator to find the first client, which is
# "client-1" since it appears first in the data.
sim = playback_client.ClientSimulator(None, device_data, command_data, True)
self.assertEqual(sim.client_id, "client-1")
device_data.reset()
command_data.reset()
# Failure is expected when searching for the non-existent client.
self.assertRaises(core.PyReachError, playback_client.ClientSimulator,
"invalid", device_data, command_data, True)
def test_client_simulator_find_client_connected(self) -> None:
"""Test the client simulator search with is_current in data."""
device_data = TestDeviceDataIterator([
types_gen.DeviceData(
device_type="session-manager",
data_type="connected-clients",
connected_clients=types_gen.ConnectedClients([
types_gen.ConnectedClient(uid="client-1"),
types_gen.ConnectedClient(uid="client-2", is_current=True),
]))
])
command_data = TestCommandDataIterator([])
device_data.start()
command_data.start()
# We expect the client simulator to find the first client, which is
# "client-2" since it has is_current = True in the device data.
sim = playback_client.ClientSimulator(None, device_data, command_data, True)
self.assertEqual(sim.client_id, "client-2")
device_data.reset()
command_data.reset()
# Failure is expected when searching for the non-existent client.
self.assertRaises(core.PyReachError, playback_client.ClientSimulator,
"invalid", device_data, command_data, True)
def test_client_simulator(self) -> None:
self._test_client_simulator(True)
def test_client_simulator_client(self) -> None:
self._test_client_simulator(False)
def _test_client_simulator(self, allow_client_logs: bool) -> None:
"""Test the filtering logic of the client simulator.
Args:
allow_client_logs: simulate logs from the client side.
"""
# Create a log where there are two test clients, one of which ("client-2")
# starts and stops.
device_data = TestDeviceDataIterator([
types_gen.DeviceData(
ts=1000,
seq=1,
device_type="session-manager",
data_type="connected-clients",
connected_clients=types_gen.ConnectedClients([
types_gen.ConnectedClient(uid="client-1"),
types_gen.ConnectedClient(uid="client-2", is_current=True),
])),
types_gen.DeviceData(
ts=10000,
seq=2,
device_type="session-manager",
data_type="connected-clients",
connected_clients=types_gen.ConnectedClients([
types_gen.ConnectedClient(uid="client-1"),
]))
])
command_data = TestCommandDataIterator([
types_gen.CommandData(
device_type="robot",
data_type="frame-request",
tag="test-allow",
origin_client="client-2")
])
device_data.start()
command_data.start()
# Test the simulator with client-2
sim = playback_client.ClientSimulator(None, device_data, command_data,
allow_client_logs)
self.assertEqual(sim.client_id, "client-2")
# Data equal test functions that also run the data through the client
# simulator filter function.
def same_data(data: types_gen.DeviceData,
cmp_data: Optional[types_gen.DeviceData] = None) -> None:
if not cmp_data:
cmp_data = utils.copy_device_data(data)
tf = sim.transform_data(data)
self.assertIsNotNone(tf)
assert tf
log_data_equal(self, tf, cmp_data)
def same_cmd(cmd: types_gen.CommandData,
cmp_cmd: Optional[types_gen.CommandData] = None) -> None:
if not cmp_cmd:
cmp_cmd = utils.copy_command_data(cmd)
tf = sim.transform_command(cmd)
self.assertIsNotNone(tf)
assert tf
log_data_equal(self, tf, cmp_cmd)
# Test data this is within the client-2 session and will pass through.
same_data(
types_gen.DeviceData(
ts=1000,
seq=4,
device_type="robot",
device_name="",
data_type="robot-state"))
same_data(
types_gen.DeviceData(
ts=10000,
seq=4,
device_type="robot",
device_name="",
data_type="robot-state"))
# Test some data with out-of-bounds timestamps.
self.assertIsNone(
sim.transform_data(
types_gen.DeviceData(
ts=999,
seq=4,
device_type="robot",
device_name="",
data_type="robot-state")))
self.assertIsNone(
sim.transform_data(
types_gen.DeviceData(
ts=10001,
seq=4,
device_type="robot",
device_name="",
data_type="robot-state")))
# Data with inhibit_frame_send = True is not allowed through.
self.assertIsNone(
sim.transform_data(
types_gen.DeviceData(
ts=1000,
inhibit_frame_send=True,
seq=4,
device_type="robot",
device_name="",
data_type="robot-state")))
# Data without the correct send_to_client client ID is not allowed.
self.assertIsNone(
sim.transform_data(
types_gen.DeviceData(
ts=1000,
send_to_clients=[
types_gen.SendToClient(tag="test", uid="client-1")
],
seq=4,
device_type="robot",
device_name="",
data_type="robot-state")))
self.assertIsNone(
sim.transform_data(
types_gen.DeviceData(
ts=1000,
tag="test",
send_to_clients=[types_gen.SendToClient(uid="client-1")],
seq=4,
device_type="robot",
device_name="",
data_type="robot-state")))
# Data with the correct send_to_clients should be allowed and re-tagged.
# The send_to_clients list is also removed from the data during transform.
same_data(
types_gen.DeviceData(
ts=1,
tag="",
send_to_clients=[
types_gen.SendToClient(tag="test-1", uid="client-1"),
types_gen.SendToClient(tag="test-2", uid="client-2")
],
seq=4,
device_type="robot",
device_name="",
data_type="robot-state"),
types_gen.DeviceData(
ts=1,
tag="test-2",
seq=4,
device_type="robot",
device_name="",
data_type="robot-state"))
same_data(
types_gen.DeviceData(
ts=1,
tag="",
send_to_clients=[
types_gen.SendToClient(tag="test-1", uid="client-1"),
types_gen.SendToClient(tag="test-allow", uid="client-2")
],
seq=4,
device_type="robot",
device_name="",
data_type="robot-state"),
types_gen.DeviceData(
ts=1,
tag="test-allow",
seq=4,
device_type="robot",
device_name="",
data_type="robot-state"))
same_data(
types_gen.DeviceData(
ts=1,
tag="test-tag",
send_to_clients=[
types_gen.SendToClient(tag="test-1", uid="client-1"),
types_gen.SendToClient(tag="", uid="client-2")
],
seq=4,
device_type="robot",
device_name="",
data_type="robot-state"),
types_gen.DeviceData(
ts=1,
tag="",
seq=4,
device_type="robot",
device_name="",
data_type="robot-state"))
# Data is allowed if its tag was within the sent command data being played
# back by the server.
same_data(
types_gen.DeviceData(
ts=1,
tag="test-allow",
seq=4,
device_type="robot",
device_name="",
data_type="robot-state"))
# Tag not in command-data, disallowed.
self.assertIsNone(
sim.transform_data(
types_gen.DeviceData(
ts=1000,
tag="test-block",
seq=4,
device_type="robot",
device_name="",
data_type="robot-state")))
# Test sending connected_clients data. The connected_clients list will be
# transformed to simulate the client's perspective - is_current will be
# set on the current client ("client-2"), all others set to false.
same_data(
types_gen.DeviceData(
ts=1000,
seq=4,
device_type="session-manager",
data_type="connected-clients",
connected_clients=types_gen.ConnectedClients([
types_gen.ConnectedClient(uid="client-1", is_current=True),
types_gen.ConnectedClient(uid="client-2"),
])),
types_gen.DeviceData(
ts=1000,
seq=4,
device_type="session-manager",
data_type="connected-clients",
connected_clients=types_gen.ConnectedClients([
types_gen.ConnectedClient(uid="client-1"),
types_gen.ConnectedClient(uid="client-2", is_current=True),
])))
same_data(
types_gen.DeviceData(
ts=1000,
seq=4,
device_type="session-manager",
data_type="connected-clients",
connected_clients=types_gen.ConnectedClients([
types_gen.ConnectedClient(uid="client-1"),
types_gen.ConnectedClient(uid="client-2"),
])),
types_gen.DeviceData(
ts=1000,
seq=4,
device_type="session-manager",
data_type="connected-clients",
connected_clients=types_gen.ConnectedClients([
types_gen.ConnectedClient(uid="client-1"),
types_gen.ConnectedClient(uid="client-2", is_current=True),
])))
same_data(
types_gen.DeviceData(
ts=1000,
seq=4,
device_type="session-manager",
data_type="connected-clients",
connected_clients=types_gen.ConnectedClients([
types_gen.ConnectedClient(uid="client-2"),
types_gen.ConnectedClient(uid="client-1"),
])),
types_gen.DeviceData(
ts=1000,
seq=4,
device_type="session-manager",
data_type="connected-clients",
connected_clients=types_gen.ConnectedClients([
types_gen.ConnectedClient(uid="client-2", is_current=True),
types_gen.ConnectedClient(uid="client-1"),
])))
# Test the client simulator filtering command-data.
empty_origin_client_cmds = [
types_gen.CommandData(
device_type="robot", data_type="frame-request", tag="test-allow"),
types_gen.CommandData(
device_type="robot", data_type="frame-request", tag="test"),
types_gen.CommandData(
device_type="robot", data_type="frame-request", tag=""),
]
if allow_client_logs:
# If client logs are allowed, then empty commands will be allowed
for cmd in empty_origin_client_cmds:
| |
if m:
# Get keys
if m.groupdict()['status_codes']:
status_codes = m.groupdict()['status_codes']
path_type = ''
if m.groupdict()['path_type']:
path_type = str(m.groupdict()['path_type'])
if path_type:
status_codes = status_codes + path_type
else:
status_codes = status_codes.rstrip()
if m.groupdict()['prefix']:
prefix = str(m.groupdict()['prefix'])
index = 0
# Network Next Hop Metric LocPrf Weight Path
# * i 10.4.1.1 2219 100 0 200 33299 51178 47751 {27016} e
# 0.0.0.0 0 32768 ?
# *> 0.0.0.0 0 32768 ?
# * i fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:10.4.1.1 2219 100 0 200 33299 51178 47751 {27016} e
m = p3_2.match(line)
if m:
# Get keys
path_type = ""
path_info = ""
if m.groupdict()['status_codes']:
status_codes = m.groupdict()['status_codes']
if m.groupdict()['path_type']:
path_type = m.groupdict()['path_type']
if m.groupdict()['next_hop']:
next_hop = m.groupdict()['next_hop']
if path_type:
status_codes = status_codes + path_type
else:
status_codes = status_codes.rstrip()
if m.groupdict()['termination']:
termination = m.groupdict()['termination']
m3 = re.compile(r'(?: *(?P<path>[0-9\{\}\s]+))?'
' +(?P<origin_codes>(i|e|\?|\|))$').match(termination)
if m3 and m3.groupdict()['path']:
path_info = m3.groupdict()['path']
if m3 and m3.groupdict()['origin_codes']:
origin_codes_info = m3.groupdict()['origin_codes']
if m.groupdict()['metric']:
metric = int(m.groupdict()['metric'])
if m.groupdict()['weight']:
weight = int(m.groupdict()['weight'])
if m.groupdict()['local_prf']:
localpref = int(m.groupdict()['local_prf'])
index += 1
# Init dict
if 'vrf' not in route_dict:
route_dict['vrf'] = {}
if vrf not in route_dict['vrf']:
route_dict['vrf'][vrf] = {}
if 'address_family' not in route_dict['vrf'][vrf]:
route_dict['vrf'][vrf]['address_family'] = {}
if address_family not in route_dict['vrf'][vrf]['address_family']:
route_dict['vrf'][vrf]['address_family'][address_family] = {}
# Set af_dict
af_dict = route_dict['vrf'][vrf]['address_family'][address_family]
if 'routes' not in af_dict:
af_dict['routes'] = {}
if prefix not in af_dict['routes']:
af_dict['routes'][prefix] = {}
if 'index' not in af_dict['routes'][prefix]:
af_dict['routes'][prefix]['index'] = {}
if index not in af_dict['routes'][prefix]['index']:
af_dict['routes'][prefix]['index'][index] = {}
if index not in af_dict['routes'][prefix]['index']:
af_dict['routes'][prefix]['index'][index] = {}
# Set keys
if status_codes:
af_dict['routes'][prefix]['index'][index]['status_codes'] = status_codes
if m.groupdict()['next_hop']:
af_dict['routes'][prefix]['index'][index]['next_hop'] = next_hop
if m.groupdict()['local_prf']:
af_dict['routes'][prefix]['index'][index]['localpref'] = localpref
if m.groupdict()['weight']:
af_dict['routes'][prefix]['index'][index]['weight'] = weight
if m.groupdict()['metric']:
af_dict['routes'][prefix]['index'][index]['metric'] = metric
if path_info:
af_dict['routes'][prefix]['index'][index]['path'] = path_info
if origin_codes_info:
af_dict['routes'][prefix]['index'][index]['origin_codes'] = origin_codes_info
continue
# Network Next Hop Metric LocPrf Weight Path
# * 10.36.3.0/24 10.36.3.254 0 0 65530 ?
# *> 10.1.1.0/24 0.0.0.0 0 32768 ?
# *>i 10.1.2.0/24 10.4.1.1 2219 100 0 200 33299 51178 47751 {27016} e
# *>i 615:11:11::/64 fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:10.4.1.1 2219 100 0 200 33299 51178 47751 {27016} e
# *> 100:2051:VEID-2:Blk-1/136
m = p4.match(line)
if m:
path_type = ""
path_data = ""
if m.groupdict()['prefix']:
prefix = m.groupdict()['prefix']
index = 1
# Get keys
if m.groupdict()['status_codes']:
status_codes = m.groupdict()['status_codes']
if m.groupdict()['path_type']:
path_type = m.groupdict()['path_type']
if path_type:
status_codes = status_codes + path_type
else:
status_codes = status_codes.rstrip()
if m.groupdict()['path']:
path_1 = m.groupdict()['path']
m3 = re.compile(r'(?: *(?P<path_inner>[0-9\{\}\s]+))?'
' +(?P<origin_codes_inner>(i|e|\?|\|))$').match(path_1)
if m3:
path_data = m3.groupdict()['path_inner']
origin_codes_data = m3.groupdict()['origin_codes_inner']
if m.groupdict()['next_hop']:
next_hop = m.groupdict()['next_hop']
if m.groupdict()['metric']:
metric = int(m.groupdict()['metric'])
if m.groupdict()['weight']:
weight = int(m.groupdict()['weight'])
if m.groupdict()['local_prf']:
localpref = int(m.groupdict()['local_prf'])
# Init dict
if 'vrf' not in route_dict:
route_dict['vrf'] = {}
if vrf not in route_dict['vrf']:
route_dict['vrf'][vrf] = {}
if 'address_family' not in route_dict['vrf'][vrf]:
route_dict['vrf'][vrf]['address_family'] = {}
if address_family not in route_dict['vrf'][vrf]['address_family']:
route_dict['vrf'][vrf]['address_family'][address_family] = {}
# Set af_dict
af_dict = route_dict['vrf'][vrf]['address_family'][address_family]
if 'routes' not in af_dict:
af_dict['routes'] = {}
if prefix not in af_dict['routes']:
af_dict['routes'][prefix] = {}
if 'index' not in af_dict['routes'][prefix]:
af_dict['routes'][prefix]['index'] = {}
if index not in af_dict['routes'][prefix]['index']:
af_dict['routes'][prefix]['index'][index] = {}
if index not in af_dict['routes'][prefix]['index']:
af_dict['routes'][prefix]['index'][index] = {}
# Set keys
if status_codes:
af_dict['routes'][prefix]['index'][index]['status_codes'] = status_codes
if path_data:
af_dict['routes'][prefix]['index'][index]['path'] = path_data
if m.groupdict()['next_hop']:
af_dict['routes'][prefix]['index'][index]['next_hop'] = next_hop
if m.groupdict()['local_prf']:
af_dict['routes'][prefix]['index'][index]['localpref'] = localpref
if m.groupdict()['weight']:
af_dict['routes'][prefix]['index'][index]['weight'] = weight
if m.groupdict()['metric']:
af_dict['routes'][prefix]['index'][index]['metric'] = metric
if origin_codes_data:
af_dict['routes'][prefix]['index'][index]['origin_codes'] = origin_codes_data
continue
# AF-Private Import to Address-Family: L2VPN E-VPN, Pfx Count/Limit: 2/1000
m = p5.match(line)
if m:
af_private_import_to_address_family = m.groupdict()['af_private_import_to_address_family']
pfx_count = int(m.groupdict()['pfx_count'])
pfx_limit = int(m.groupdict()['pfx_limit'])
if 'vrf' not in route_dict:
route_dict['vrf'] = {}
if vrf not in route_dict['vrf']:
route_dict['vrf'][vrf] = {}
if 'address_family' not in route_dict['vrf'][vrf]:
route_dict['vrf'][vrf]['address_family'] = {}
if new_address_family not in route_dict['vrf'][vrf]['address_family']:
route_dict['vrf'][vrf]['address_family'][new_address_family] = {}
route_dict['vrf'][vrf]['address_family'][new_address_family] \
['af_private_import_to_address_family'] = af_private_import_to_address_family
route_dict['vrf'][vrf]['address_family'][new_address_family] \
['pfx_count'] = pfx_count
route_dict['vrf'][vrf]['address_family'][new_address_family] \
['pfx_limit'] = pfx_limit
continue
# Route Distinguisher: 200:1
# Route Distinguisher: 300:1 (default for vrf VRF1) VRF Router ID 10.94.44.44
m = p6.match(line)
if m:
route_distinguisher = str(m.groupdict()['route_distinguisher'])
new_address_family = original_address_family + ' RD ' + route_distinguisher
# Init dict
if m.groupdict()['default_vrf']:
vrf = m.groupdict()['default_vrf']
if 'vrf' not in route_dict:
route_dict['vrf'] = {}
if vrf not in route_dict['vrf']:
route_dict['vrf'][vrf] = {}
if 'address_family' not in route_dict['vrf'][vrf]:
route_dict['vrf'][vrf]['address_family'] = {}
if new_address_family not in route_dict['vrf'][vrf]['address_family']:
route_dict['vrf'][vrf]['address_family'][new_address_family] = {}
# Set keys
route_dict['vrf'][vrf]['address_family'][new_address_family]\
['bgp_table_version'] = bgp_table_version
route_dict['vrf'][vrf]['address_family'][new_address_family]\
['route_identifier'] = local_router_id
route_dict['vrf'][vrf]['address_family'][new_address_family]\
['route_distinguisher'] = route_distinguisher
if vrf:
route_dict['vrf'][vrf]['address_family'][new_address_family]['default_vrf'] = \
vrf
if m.groupdict()['vrf_router_id']:
route_dict['vrf'][vrf]['address_family'][new_address_family]['vrf_route_identifier'] = \
str(m.groupdict()['vrf_router_id'])
# Reset address_family key and af_dict for use in other regex
address_family = new_address_family
af_dict = route_dict['vrf'][vrf]['address_family'][address_family]
# Init routes dict
if 'routes' not in af_dict:
del af_dict
continue
return route_dict
# ===================================
# Parser for:
# * 'show bgp all'
# * 'show bgp {address_family} all'
# ===================================
class ShowBgpAll(ShowBgpSuperParser, ShowBgpSchema):
''' Parser for:
* 'show bgp all'
* 'show bgp {address_family} all'
'''
cli_command = ['show bgp {address_family} all',
'show bgp all',
]
exclude = ['bgp_table_version']
def cli(self, address_family='', output=None):
ret_dict = {}
restricted_list = ['ipv4 unicast', 'ipv6 unicast']
if output is None:
# Build command
if address_family:
if address_family not in restricted_list:
cmd = self.cli_command[0].format(address_family=address_family)
else:
return ret_dict
else:
cmd = self.cli_command[1]
# Execute command
show_output = self.device.execute(cmd)
else:
show_output = output
# Call super
return super().cli(output=show_output, address_family=address_family)
# ======================================
# Parser for:
# * 'show ip bgp all'
# * 'show ip bgp {address_family} all'
# ======================================
class ShowIpBgpAll(ShowBgpSuperParser, ShowBgpSchema):
''' Parser for:
* 'show ip bgp all'
* 'show ip bgp {address_family} all'
'''
cli_command = ['show ip bgp {address_family} all',
'show ip bgp all',
]
def cli(self, address_family='', output=None):
if output is None:
# Build command
if address_family:
cmd = self.cli_command[0].format(address_family=address_family)
else:
cmd = self.cli_command[1]
# Execute command
show_output = self.device.execute(cmd)
else:
show_output = output
# Call super
return super().cli(output=show_output, address_family=address_family)
# =============================================
# Parser for:
# * 'show bgp {address_family} rd {rd}'
# * 'show bgp {address_family} vrf {vrf}'
# =============================================
class ShowBgp(ShowBgpSuperParser, ShowBgpSchema):
''' Parser for:
* 'show bgp {address_family} rd {rd}'
* 'show bgp {address_family} vrf {vrf}'
'''
cli_command = ['show bgp {address_family} vrf {vrf}',
'show bgp {address_family} rd {rd}',
]
def cli(self, address_family='', rd='', vrf='', output=None):
if output is None:
# Build command
if address_family and vrf:
cmd = self.cli_command[0].format(address_family=address_family,
vrf=vrf)
elif address_family and rd:
cmd = self.cli_command[1].format(address_family=address_family,
rd=rd)
# Execute command
show_output = self.device.execute(cmd)
else:
show_output = output
# Call super
return super().cli(output=show_output, vrf=vrf,
address_family=address_family)
# =============================================
# Parser for:
# * 'show ip bgp'
# * 'show ip bgp {address_family}'
# * 'show ip bgp {address_family} rd {rd}'
# * 'show ip bgp {address_family} vrf {vrf}'
# =============================================
class ShowIpBgp(ShowBgpSuperParser, ShowBgpSchema):
''' Parser for:
* 'show ip bgp'
* 'show ip bgp {address_family}'
* 'show ip bgp {address_family} rd {rd}'
* 'show ip bgp {address_family} vrf {vrf}'
'''
cli_command = ['show ip bgp {address_family} vrf {vrf}',
'show ip bgp {address_family} rd {rd}',
'show ip bgp {address_family}',
'show ip bgp',
]
def cli(self, address_family='', rd='', vrf='', output=None):
if output is None:
# Build command
if address_family and vrf:
cmd = self.cli_command[0].format(address_family=address_family,
vrf=vrf)
elif address_family and rd:
cmd = self.cli_command[1].format(address_family=address_family,
rd=rd)
elif address_family:
cmd = self.cli_command[2].format(address_family=address_family)
else:
cmd = self.cli_command[3]
# Execute command
show_output = self.device.execute(cmd)
else:
show_output = output
# Call super
return super().cli(output=show_output, vrf=vrf,
address_family=address_family)
#-------------------------------------------------------------------------------
# ======================================================
# Schema for:
# * 'show bgp all detail'
# * 'show ip bgp all detail'
# * 'show bgp {address_family} vrf {vrf} detail'
# * 'show bgp {address_family} rd {rd} detail'
# * 'show ip bgp {address_family} vrf {vrf} detail'
# * 'show ip bgp {address_family} rd {rd} detail'
# ======================================================
class ShowBgpAllDetailSchema(MetaParser):
''' Schema for:
* 'show bgp all detail'
* 'show ip bgp all detail'
* 'show bgp {address_family} vrf {vrf} detail'
* 'show bgp {address_family} rd {rd} detail'
* 'show ip bgp {address_family} vrf {vrf} detail'
* 'show ip bgp {address_family} rd {rd} detail'
'''
schema = {
'instance':
{'default':
{'vrf':
{Any():
{'address_family':
{Any():
{Optional('route_distinguisher'): str,
Optional('default_vrf'): str,
Optional('prefixes'):
{Any():
{Optional('paths'): str,
Optional('available_path'): str,
Optional('best_path'): str,
Optional('table_version'): str,
Optional('index'):
{Any():
{Optional('next_hop'): str,
Optional('next_hop_igp_metric'): str,
Optional('gateway'): str,
Optional('route_info'): str,
Optional('next_hop_via'): str,
Optional('update_group'): Any(),
Optional('status_codes'): str,
Optional('origin_codes'): str,
Optional('metric'): int,
Optional('inaccessible'): bool,
Optional('localpref'): int,
| |
#!/usr/bin/env python
## Copyright 2002-2010 by PyMMLib Development Group (see AUTHORS file)
## This code is part of the PyMMLib distribution and governed by
## its license. Please see the LICENSE file that should have been
## included as part of this package.
from __future__ import generators
import sys
import getopt
import copy
from mmLib.mmCIF import *
class mmCIFValidator(object):
def __init__(self):
self.dict_list = []
self.cif_save_cache = {}
self.parent_tag_cache = {}
def load_dictionary(self, path):
"""Loads a mmCIF dictionary into the manager
"""
cif_dict = mmCIFDictionary()
cif_dict.path = path
cif_dict.load_file(path)
self.dict_list.append(cif_dict)
def iter_cif_saves(self):
"""Iterates all mmCIFSave objects in all dictionaries
"""
for cif_dict in self.dict_list:
for cif_save in cif_dict:
yield cif_save
def split_tag(self, tag):
table_name, column = tag[1:].split(".")
return table_name.lower(), column.lower()
def join_tag(self, table_name, column):
return "_%s.%s" % (table_name, column)
def lookup_cif_save(self, tag):
"""Returns the first save block found in the list of dictionaries.
"""
try:
return self.cif_save_cache[tag]
except KeyError:
for cif_dict in self.dict_list:
try:
cif_save = cif_dict[tag]
except KeyError:
pass
else:
self.cif_save_cache[tag] = cif_save
return cif_save
return None
def iter_column_saves(self, table_name):
"""Iterates the mmCIFSave objects of all subsection of the given
section.
"""
for cif_save in self.iter_cif_saves():
try:
tag = cif_save["item"]["name"]
except KeyError:
continue
table_namex, columnx = self.split_tag(tag)
if table_name == table_namex:
yield cif_save
def iter_mandatory_columns(self, table_name):
"""Iterates the mandatory subsection names of a section.
"""
for cif_save in self.iter_column_saves(table_name):
if cif_save["item"]["mandatory_code"] == "yes":
tag = cif_save["item"]["name"]
table_namex, columnx = self.split_tag(tag)
yield columnx
def lookup_parent(self, tag):
"""Finds the parent tag of the given tag. Recursive method.
"""
for cif_save in self.iter_cif_saves():
try:
item_linked_table = cif_save["item_linked"]
except KeyError:
pass
else:
for cif_row in item_linked_table:
if cif_row["child_name"] == tag:
return self.lookup_parent(cif_row["parent_name"])
return tag
def lookup_table_primary_tag(self, table_name):
"""Returns the name of the primary key column for a table, or returns
None if there is no primary key column.
"""
cif_save = self.lookup_cif_save(table_name)
if cif_save == None:
return None
try:
return cif_save["category_key"]["name"]
except KeyError:
pass
return None
def lookup_children(self, tag):
"""Returns a list of the tag's children.
"""
child_tag_list = []
cif_save = self.lookup_cif_save(tag)
if cif_save != None:
try:
item_linked_table = cif_save["item_linked"]
except KeyError:
pass
else:
for cif_row in item_linked_table:
child_tag_list.append(cif_row["child_name"])
return child_tag_list
def is_root_tag(self, tag):
"""Returns True if the tag has linked children and no parent tags.
"""
children = self.lookup_children(tag)
if len(children) == 0:
return False
parent = self.lookup_parent(tag)
if parent != tag:
return False
return True
class Any(object):
"""Returns True when compaired with any other object.
"""
def __eq__(self, other):
return True
class mmCIFMerge(object):
def __init__(self, name, validator):
self.cif_data = mmCIFData(name)
self.validator = validator
self.merge_accel_dict = {}
self.merge_accel_list = []
def log(self, text):
"""Log what is happening.
"""
sys.stderr.write("[mmCIFMerge] %s\n" % (text))
def save_file(self, fil):
cif_file = mmCIFFile()
cif_file.append(copy.deepcopy(self.cif_data))
cif_file.save_file(fil)
## <general methods>: Utility calls can be made with arguments with
## any CIF elements
def get_column_values(self, cif_table, column):
"""Return a list of all the values found in the table under the
given column.
"""
value_list = []
for cif_row in cif_table:
try:
value_list.append(cif_row[column])
except KeyError:
pass
return value_list
def sort_columns(self, cif_table):
"""Sort columns so they look nice ;)
"""
if "id" in cif_table.columns:
cif_table.columns.remove("id")
cif_table.columns.insert(0, "id")
def remove_blank_values(self, cif_data):
"""Removes any blank [.?] from the cif_data block.
"""
for cif_table in cif_data:
for cif_row in cif_table:
for (column, value) in cif_row.items():
if value == "." or value == "?" or value == "":
del cif_row[column]
def add_parent_values(self, cif_data):
"""Checks all linked values in the cif_data block for validity by
checking all child values aginst parent values and ensureing the
parent values exist. If the parent values do not exist, they are added.
"""
for cif_table in cif_data:
for column in cif_table.columns:
tag = self.validator.join_tag(cif_table.name, column)
parent_tag = self.validator.lookup_parent(tag)
if parent_tag == tag:
continue
## if we get here, then this table/column has a parent
(par_tbl, par_col) = self.validator.split_tag(parent_tag)
try:
parent_values = self.get_column_values(
cif_data[par_tbl], par_col)
except KeyError:
parent_values = []
child_values = self.get_column_values(cif_table, column)
## fill parent_add_list with values not found in the
## parent table
parent_add_list = []
for child_value in child_values:
if child_value in parent_values:
continue
if child_value in parent_add_list:
continue
parent_add_list.append(child_value)
## add any unaccounted for parent values
for value in parent_add_list:
try:
parent_cif_table = cif_data[par_tbl]
except KeyError:
parent_cif_table = mmCIFTable(par_tbl)
cif_data.append(parent_cif_table)
if par_col not in parent_cif_table.columns:
parent_cif_table.columns.append(par_col)
cif_row = mmCIFRow()
parent_cif_table.append(cif_row)
cif_row[par_col] = value
self.log("adding parent %s=%s for %s=%s" % (
parent_tag, value, tag, value))
def change_root_value(
self, cif_data, table_name, column, old_value, new_value):
"""Changes the value of the table_name.column from old value to
new_value, and propagates that change throughout all linked items
in the cif_data. If the new_value matches a row in the root table,
then the row containing old_value is removed instead of having its
value changed.
"""
try:
cif_table = cif_data[table_name]
except KeyError:
## what? no table? make one
cif_table = mmCIFTable(table_name, [column])
cif_data.append(cif_table)
cif_row = mmCIFRow()
cif_table.append(cif_row)
cif_row[column] = new_value
else:
old_cif_row = None
new_cif_row = None
## check for a row in the table matching the new value
for cif_row in cif_table:
if cif_row.get(column) == new_value:
new_cif_row = cif_row
elif cif_row.get(column) == old_value:
old_cif_row = cif_row
## if there was a row in the table already matching the new
## value, then use it and remove the old row
if old_cif_row == None and new_cif_row == None:
cif_row = mmCIFRow()
cif_table.append(cif_row)
cif_row[column] = new_value
elif old_cif_row != None and new_cif_row == None:
old_cif_row[column] = new_value
elif old_cif_row == None and new_cif_row != None:
cif_table.remove(old_cif_row)
elif old_cif_row != None and new_cif_row != None:
cif_table.remove(old_cif_row)
## set the child values
num_changed = 0
root_tag = self.validator.join_tag(table_name, column)
child_tags = self.validator.lookup_children(root_tag)
for tag in child_tags:
table_name, column = self.validator.split_tag(tag)
try:
cif_table = cif_data[table_name]
except KeyError:
continue
for cif_row in cif_table:
try:
value = cif_row[column]
except KeyError:
pass
else:
if value == old_value:
num_changed += 1
cif_row[column] = new_value
self.log("changed value %s=%s->%s" % (
tag, old_value, new_value))
def change_root_value_uber_alles(
self, cif_data, table_name, column, old_value, new_value):
"""For the given root table/column, this function keeps only one
row (the one matching old_value), changes the value to new_value,
and sets every possible linked table/row in cif_data with new_value.
"""
## STEP 1:
## first the root table
try:
cif_table = cif_data[table_name]
except KeyError:
cif_table = mmCIFTable(table_name, [column])
cif_data.append(cif_table)
cif_row = mmCIFRow()
cif_table.append(cif_row)
cif_row[column] = new_value
else:
## find the cif_row with the old value, and the cif_row with
## the new value if they exist
old_cif_row = None
new_cif_row = None
for cif_row in cif_table:
if cif_row.get(column) == old_value:
old_cif_row = cif_row
if cif_row.get(column) == new_value:
new_cif_row = cif_row
keep_cif_row = None
if new_cif_row != None:
keep_cif_row = new_cif_row
elif old_cif_row != None:
old_cif_row[column] = new_value
keep_cif_row = old_cif_row
else:
if column not in cif_table.columns:
cif_table.columns.append(column)
keep_cif_row = cif_row = mmCIFRow()
cif_table.append(cif_row)
cif_row[column] = new_value
## remove all the rows except for the one matching the new_value
remove_list = []
for cif_row in cif_table:
if cif_row != keep_cif_row:
remove_list.append(cif_row)
for cif_row in remove_list:
cif_table.remove(cif_row)
## STEP 2:
## Now get their little children too! If the child tables exist
## in the cif_data at all, then set the appropriate column of
## every row to new_value.
root_tag = self.validator.join_tag(table_name, column)
child_tags = self.validator.lookup_children(root_tag)
for child_tag in child_tags:
(chld_tbl, chld_col) = self.validator.split_tag(child_tag)
try:
cif_table = cif_data[chld_tbl]
except KeyError:
continue
if chld_col not in cif_table.columns:
cif_table.columns.append(chld_col)
for cif_row in cif_table:
cif_row[chld_col] = new_value
## </general methods>
## <merge utility methods>: These are part of the merge process and
## work on self.cif_data, and the parts
## of other CIF files being merged.
def resolve_cif_data_conflicts(self, cif_data):
"""Compares primary column values of the cif_data with values
in the merged data and changes them to new, non conflicting
values. This is done before the cif_data can be merged.
"""
for cif_table in cif_data:
tag = self.validator.lookup_table_primary_tag(cif_table.name)
if tag == None:
continue
table_name, column = self.validator.split_tag(tag)
## we are only interested in primary tags which are also
## root tags
if self.validator.is_root_tag(tag) == False:
continue
| |
import requests, re
from typing import List, Optional, Tuple, Literal
#----- Some useful data -----#
regex_version = r'<div class=\"version\">[^<]+<br/>\s*<[^>]+>\s*([^<\n\r]+)\s*</a>'
timeout = 5
#----- Errors management -----#
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class MissingLibError(Error):
"""Raised when a library is missing"""
def __init__(self,message):
self.message = message
class ItemNotFoundError(Error):
"""Raised when an item cannot be found"""
def __init__(self,message):
self.message = message
class WrongDataError(Error):
"""Raised when there is an error during parsing data"""
def __init__(self,message):
self.message = message
#----- Classes -----#
class Entity():
"""This class represents an entity, and all information about it.
Here is a list of all variables accessible after creation:
.. hlist::
:columns: 2
* name
* entity_ids
* entity_ype
* health
* attack
* xp
* biomes
* dimensions
* version
* image
* url
.. tip:: Details on each of the information are given in comments in the source code
"""
def __init__(self,name, entity_ids, entity_type, health, attack, xp, biomes, sizes, version, image, url=None):
self.name: str = name # Name of the entity
self.entity_ids: List[str] = entity_ids # Text id
self.entity_type: str = entity_type # Type of the entity
self.health: int = health # Health points
self.attack: Optional[float] = attack # Attack points
self.xp: int = xp # XP droped
self.biomes: List[str] = biomes # Favorites biomes
self.sizes: list[float] = sizes # Entity 3D sizes (width, length, height)
self.version: str = version # Game version when adding
self.image: Optional[str] = image # Image url
self.url: str = url # Url of the entity page
class Item():
"""This class represent an item or a block. Some information can be empty depending on the type of item (weapon, block...).
Here is a list of all variables accessible after creation:
.. hlist::
:columns: 2
* Name
* ID
* Stack
* CreativeTab
* Damage
* Strength
* Tool
* Version
* Mobs
* Image
* Url
.. tip:: Details on each of the information are given in comments in the source code
"""
def __init__(self, name, item_ids, stack_size, tab, damages, durability, tnt_resistance, tool, version, mobs, image, url=None):
self.name: str = name # Name of the item
self.item_ids: List[str] = item_ids # Text id
self.stack_size: Optional[int] = stack_size # Size of a stack
self.creative_tab: Optional[str] = tab # Tab in creative gamemode
self.damages: Optional[float] = damages # Weapon damage
self.durability: Optional[int] = durability # Durability
self.tnt_resistance: Optional[int] = tnt_resistance # Resistance to TNT explosion
self.tool: str = tool # Tool able to destroy it
self.version: str = version # Game version when adding
self.mobs: List[str] = mobs # List of mobs that can drop this item
self.image: Optional[str] = image # Url of an image
self.url: str = url # Url of the item page
class Command():
"""This class represent a command (sometimes also called *cheat*).
Here is a very long list of the immensity of the hundreds of information accessible after creation (yes there are only 4, it's not a bug)
.. hlist::
:columns: 1
* name
* syntax
* examples
* version
* url
.. tip:: Details on each of the information are given in comments in the source code
"""
def __init__(self, name, syntax, examples, version, url=None):
self.name: str = name # Name of the command
self.syntax: List[str] = syntax # List of parameters, sorted in order of use
self.examples: list[Tuple[str, str]] = examples # List of some examples, contained in tuples in the form (syntax, explanation)
self.version: str = version # Game version when adding
self.url: str = url # Url of the command page
class Advancement():
"""This class represents an advancement, the event that replaces achievements since Minecraft Java Edition 1.12.
Here is the list of information that can be obtained after creating the object:
.. hlist::
:columns: 1
* name
* adv_id
* adv_type
* description
* parent
* children
* version
* image
* url
.. tip:: Details on each of the information are given in comments in the source code
"""
def __init__(self, name, adv_id, adv_type, description, parent, children, version, image, url=None):
self.name: str = name # Name of the advancement
self.adv_id: str = adv_id # Text identifier
self.adv_type: Literal["Progrès", "Objectif (Goal)"] = adv_type # Type of the advancement (Progrès/Objectif)
self.description: str = description # Description of the advancement (fr)
self.parent: Optional[str] = parent # Previous advancement in the Tree structure
self.children: List[str] = children # List of next advancement(s) in the Tree structure
self.version: str = version # Game version when adding
self.image: Optional[str] = image # Logo of the advancement
self.url: str = url # Url of the advancement page
class SearchType:
ENTITY = "entité"
BLOCK = "bloc"
ITEM = "item"
POTION = "potion" # NOT IMPLEMENTED
ENCHANT = "enchant" # NOT IMPLEMENTED
ADVANCEMENT = "progrès"
EFFECT = "effet" # NOT IMPLEMENTED
SUCCESS = "succès" # NOT IMPLEMENTED
COMMAND = "commande"
@classmethod
def as_list(cls) -> Tuple[str, ...]:
return (cls.ENTITY, cls.BLOCK, cls.ITEM, cls.POTION, cls.ENCHANT,
cls.ADVANCEMENT, cls.EFFECT, cls.SUCCESS, cls.COMMAND)
#----- Useful functions -----#
def main(name: str, item_type: str):
"""The main function shortens the information acquisition method. It allows to generate an object from a simple word \
and a type, without having to execute all the sub-functions. This is probably the function you will use most often for \
standard library use.
Parameters
----------
name: :class:`str`
The name of the item to search for
item_type: :class:`str`
The type of item. Use :class:`~SearchType` to make sure to get the right identifier
Return
------
diverses
Object of type Entity(), Item() or other, depending on the Type given in parameters
Raises
------
:class:`~frmc_lib.ItemNotFoundError`
The type of the given item is not available yet, or the given item can't be found
"""
data = search(name)
urls = search_links(data, item_type)
if len(urls) == 0:
raise ItemNotFoundError("This item cannot be found: {} (Type: {})".format(name,item_type))
if item_type.lower() in {SearchType.BLOCK, SearchType.ITEM}:
item = search_item(url=urls[0])
elif item_type.lower() == SearchType.ENTITY:
item = search_entity(url=urls[0])
elif item_type.lower() == SearchType.COMMAND:
item = search_cmd(url=urls[0])
elif item_type.lower() == SearchType.ADVANCEMENT:
item = search_adv(url=urls[0])
else:
raise ItemNotFoundError("This item type is not available: {}".format(item_type))
return item
def url_to_data(url: str):
"""This function allows you to retrieve the source code of a web page, from its url address. \
You just have to give the url as parameter to receive a string containing the html code.
Parameters
----------
url: :class:`str`
The url of the page
Return
------
:class:`str`
The html string of the page
Raises
------
:class:`TypeError`
The url must be a string.
"""
if not isinstance(url, str):
raise TypeError("url must be a string")
try:
data = requests.get(url,timeout=timeout).text
except UnicodeDecodeError:
data = requests.get(url,timeout=timeout).content.decode("utf-8")
except Exception as exc:
print("Error while converting the url \"{}\" to data:".format(url))
raise exc
return data
#----- Searching functions -----#
def search(item: str):
"""This function returns the source code of the search page, initialized with a string containing the query.
Parameters
----------
item: :class:`str`
The name of the item to search.
Return
------
:class:`str`
The url, in string
Raises
------
:class:`TypeError`
The given item must be a string
"""
if not isinstance(item, str):
raise TypeError("item must be a string")
p = "http://fr-minecraft.net/recherche.php?search="+"+".join(item.split(" "))
p = url_to_data(p)
return p
def search_links(html: str, result_type: str=None, limit: int=1):
"""This function allows you to find a certain number of links to item records from the \
html code of the search page. For example if you want to get the url addresses \
of all the swords, you have to give in arguments the code of the page \
https://fr-minecraft.net/recherche.php?search=sword and "Item" in type.
You can get several links by changing the value of the limit argument (1 by default)
Parameters
----------
html: :class:`str`
The html string of the search page
result_type: :class:`str`
The type of item to find. Use the :class:`~SearchType` to make sure to use the right one. result_type=None admits all types
limit: :class:`int`
The maximum number of links to return
Return
------
:class:`list`
List of matching links
Raises
------
:class:`TypeError`
One of these arguments | |
< len_y:
# インクリメントしていき全部を舐めていく(effective_lengthずつ飛ばしているけど良い??)
y__ = y[i:i+effective_length]
if effective_length > len(y__):
break
else:
y_.append(y__)
i = i + int(effective_length)
y = np.stack(y_) # (effective_length, 2N)
label = np.zeros(24, dtype='f')
# y: clip nums, seq -> clip_nums, width, height
return {
"wav": torch.tensor(y, dtype=torch.float),
"target": torch.tensor(label, dtype=torch.float),
"id": recording_id,
}
################################################
# Model #
################################################
def init_layer(layer):
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, "bias"):
if layer.bias is not None:
layer.bias.data.fill_(0.)
class AudioClassifier(nn.Module):
def __init__(self, model_name, n_out):
super(AudioClassifier, self).__init__()
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=80, time_stripes_num=2,
freq_drop_width=16, freq_stripes_num=2)
self.net = timm.create_model(model_name, pretrained=True, in_chans=1)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout1 = nn.Dropout(0.3)
self.dropout2 = nn.Dropout(0.3)
n_features = self.net.classifier.in_features
self.net_classifier = nn.Linear(n_features, n_out)
self.init_weight()
# korrniaのrandom cropはh,wを想定しているため注意
self.transform = nn.Sequential(K.RandomHorizontalFlip(p=0.1),
# K.GaussianBlur(7, p=0.5),
# K.RandomCrop((round(IMAGE_HEIGHT*0.7), round(IMAGE_WIDTH*0.7)),p=0.3)
)
def init_weight(self):
init_layer(self.net_classifier)
def forward(self, x, f_min_mels, f_max_mels, train=True, test=False): # x: (bs, 1, w, h)
# f_min_melとf_max_melによってカットする
bs, ch, w, h = x.shape
x = x.reshape(bs*w, -1)
bsw = bs*w
spects = []
fi = 0
if test:
for ii, i in enumerate(range(bsw)[::w]):
spect = x[i:i+w] # torch (w, h)
for f_min, f_max in zip(f_min_mels, f_max_mels):
_spect = cut_spect(spect.transpose(0, 1), f_min, f_max).transpose(0, 1) # out:torch (w, h)
# resizeする.
_spect = torch.unsqueeze(_spect, 0)
_spect = torch.unsqueeze(_spect, 0) # torch(1,1,w,h)
_spect = F.interpolate(_spect, (IMAGE_WIDTH, IMAGE_HEIGHT),
mode='bilinear',
align_corners=False) # out: torch (1, 1, w, h)
_spect = torch.squeeze(_spect, 0) # out: torch (1, w, h)
spects.append(_spect)
x = torch.stack(spects) # torch (bs, 1, w, h) bs=24*bs*10
else:
for ii, i in enumerate(range(bsw)[::w]):
spect = x[i:i+w] # torch (w, h)
f_min = f_min_mels[fi]
f_max = f_max_mels[fi]
spect = cut_spect(spect.transpose(0, 1), f_min, f_max).transpose(0, 1) # out:torch (w, h)
# resizeする.
spect = torch.unsqueeze(spect, 0)
spect = torch.unsqueeze(spect, 0) # torch(1,1,w,h)
spect = F.interpolate(spect, (IMAGE_WIDTH, IMAGE_HEIGHT),
mode='bilinear',
align_corners=False) # out: torch (1, 1, w, h)
if train:
spect = self.transform(spect.transpose(2, 3)) # out: torch(1,1,h,w)
spect = spect.transpose(2, 3) # out: torch(1,1,w,h)
spect = torch.squeeze(spect, 0) # torch (1, w, h)
spects.append(spect)
fi += 1
x = torch.stack(spects) # torch (bs, 1, w, h)
x = do_normalize(x)
if train:
x = self.spec_augmenter(x)
# x = x.expand(x.shape[0], 3, x.shape[2], x.shape[3]) # channel分複製
# Output shape (batch size, channels, time, frequency)
x = self.net.forward_features(x)
x = self.avg_pool(x).flatten(1)
x = self.dropout1(x)
x = self.net_classifier(x)
return x
################################################
# Loss #
################################################
def f1_loss(y_true, y_pred, is_training=False, epsilon=1e-7) -> torch.Tensor:
'''
Calculate F1 score. Can work with gpu tensors
The original implmentation is written by <NAME> on Kaggle.
Returns
-------
torch.Tensor
`ndim` == 1. 0 <= val <= 1
Reference
---------
- https://www.kaggle.com/rejpalcz/best-loss-function-for-f1-score-metric
- https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html#sklearn.metrics.f1_score
- https://discuss.pytorch.org/t/calculating-precision-recall-and-f1-score-in-case-of-multi-label-classification/28265/6
'''
y_pred = y_pred > 0.5
tp = (y_true * y_pred).sum()
fp = ((1 - y_true) * y_pred).sum()
fn = (y_true * (1 - y_pred)).sum()
precision = tp / (tp + fp + epsilon)
recall = tp / (tp + fn + epsilon)
f1 = 2 * (precision*recall) / (precision + recall + epsilon)
return f1
# https://www.kaggle.com/c/rfcx-species-audio-detection/discussion/213075
class BCEFocalLoss(nn.Module):
def __init__(self, alpha=0.25, gamma=2.0):
super().__init__()
self.alpha = alpha
self.gamma = gamma
def forward(self, preds, targets):
bce_loss = nn.BCEWithLogitsLoss(reduction='none')(preds, targets)
probas = torch.sigmoid(preds)
loss = targets * self.alpha * (1. - probas)**self.gamma * bce_loss + (1. - targets) * probas**self.gamma * bce_loss
loss = loss.mean()
return loss
################################################
# Training helper functions #
################################################
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class MetricMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.y_true = []
self.y_pred = []
def update(self, y_true, y_pred):
self.y_true.extend(y_true.cpu().detach().numpy().tolist())
self.y_pred.extend(torch.sigmoid(y_pred).cpu().detach().numpy().tolist())
@property
def avg(self):
self.f1score = f1_loss(np.array(self.y_true), np.array(self.y_pred))
score_class, weight = lwlrap(np.array(self.y_true), np.array(self.y_pred))
self.score = (score_class * weight).sum()
return {
"lwlrap": self.score,
"f1score": self.f1score
}
def get_mixup_indices(bs, f_min_mels, f_max_mels):
indices_matrix = np.zeros((bs, bs))
for img1_idx in range(bs):
for img2_idx in range(bs):
if img1_idx != img2_idx:
mix_flag = (f_min_mels[img2_idx] >= f_min_mels[img1_idx]) & (f_max_mels[img2_idx] <= f_max_mels[img1_idx])
if mix_flag:
indices_matrix[img1_idx, img2_idx] = 1
break # img1に対してmixupするimg2(img1の周波数帯に存在するもの)が1つ見つかり次第終了
indices = np.arange(bs)
indices_matrix_1 = np.where(indices_matrix == 1)
for i, j in zip(indices_matrix_1[0], indices_matrix_1[1]):
if i in range(bs):
indices[i] = j
else:
indices[i] = i
return indices
def mixup(data, targets, f_min_mels, f_max_mels, alpha=1.0):
bs = data.size(0)
indices = get_mixup_indices(bs, f_min_mels, f_max_mels)
shuffled_data = data[indices]
shuffled_targets = targets[indices]
# lam = np.random.beta(alpha, alpha)
lam = 0.5
data = data * lam + shuffled_data * (1 - lam)
targets = targets * lam + shuffled_targets * (1 - lam)
return data, targets
def train_epoch(model, spectrogram_extractor, logmel_extractor, loader,
criterion, optimizer, scheduler, epoch, device, p_mixup,
normalize=True, resize=True, spec_aug=True):
losses = AverageMeter()
scores = MetricMeter()
model.train()
t = tqdm(loader)
for i, sample in enumerate(t):
x = sample['wav'].to(device) # (bs, seq)
target = sample['target'].to(device)
f_min_mels = sample["f_min_mel"]
f_max_mels = sample["f_max_mel"]
x = spectrogram_extractor(x) # (batch_size, 1, time_steps, freq_bins)
x = logmel_extractor(x)
if np.random.rand(1) < p_mixup:
# mixup
mix_x, mix_target = mixup(x, target, f_min_mels, f_max_mels)
output = model(mix_x, f_min_mels, f_max_mels, train=True)
loss = criterion(output, mix_target)
else:
output = model(x, f_min_mels, f_max_mels, train=True)
loss = criterion(output, target)
if CFG.gradient_accumulation_steps > 1:
loss = loss / CFG.gradient_accumulation_steps
else:
loss.backward()
if (i + 1) % CFG.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
if CFG.step_scheduler:
scheduler.step()
bs = x.size(0)
scores.update(target, output)
losses.update(loss.item(), bs)
t.set_description(f"Train E:{epoch} - Loss{losses.avg:0.4f}")
t.close()
return scores.avg, losses.avg
def valid_epoch(model, spectrogram_extractor, logmel_extractor,
loader, criterion, epoch, device):
losses = AverageMeter()
scores = MetricMeter()
model.eval()
with torch.no_grad():
t = tqdm(loader)
for i, sample in enumerate(t):
x = sample['wav'].to(device) # (bs, seq)
target = sample['target'].to(device)
f_min_mels = sample["f_min_mel"]
f_max_mels = sample["f_max_mel"]
x = spectrogram_extractor(x) # (batch_size, 1, time_steps, freq_bins)
x = logmel_extractor(x)
bs = x.size(0)
output = model(x, f_min_mels, f_max_mels, train=False)
# output = output.reshape(bs, 24, -1) #(bs, 24, 24) batchsize,
# output, _ = torch.max(output, dim=1)
loss = criterion(output, target)
scores.update(target, output)
losses.update(loss.item(), bs)
t.set_description(f"Valid E:{epoch} - Loss:{losses.avg:0.4f}")
t.close()
return scores.avg, losses.avg
def test_epoch(model, spectrogram_extractor, logmel_extractor, loader,
f_min_mels, f_max_mels, device, normalize=True, resize=True):
model.eval()
pred_list = []
id_list = []
with torch.no_grad():
t = tqdm(loader)
for i, sample in enumerate(t):
x = sample["wav"].to(device)
bs, c, seq = x.shape
x = x.reshape(bs*c, seq)
x = spectrogram_extractor(x)
x = logmel_extractor(x)
id = sample["id"]
output = torch.sigmoid(model(x, f_min_mels, f_max_mels, train=False, test=True))
output = output.reshape(bs, c*24, -1)
output, _ = torch.max(output, dim=1)
output = output.cpu().detach().numpy().tolist()
pred_list.extend(output)
id_list.extend(id)
return pred_list, id_list
def get_valid_all_clip_result(fold: int):
# Load Data
train_df = pd.read_csv(OUTPUT_DIR / "folds.csv")
train_df = train_df[train_df["istp"] == 1].reset_index(drop=True)
species_fmin_fmax = pd.read_csv(OUTPUT_DIR / "species_fmin_fmax.csv")
f_min_mels = torch.tensor(species_fmin_fmax["f_min_mel"].values, dtype=torch.int)
f_max_mels = torch.tensor(species_fmin_fmax["f_max_mel"].values, dtype=torch.int)
# Load model
model = AudioClassifier(CFG.model_param["encoder"], CFG.model_param["classes_num"])
model.load_state_dict(torch.load(OUTPUT_DIR / f'fold-{fold}.bin'))
model = model.to(device)
# Get valid
valid_fold = train_df[train_df.kfold == fold].reset_index(drop=True)
test_dataset = TestDataset(
df=valid_fold,
period=CFG.period,
transforms=None,
data_path="../input/train",
)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=CFG.batch_size//32,
shuffle=False,
drop_last=False,
num_workers=CFG.num_workers
)
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
spectrogram_extractor = Spectrogram(n_fft=WINDOW_SIZE, hop_length=HOP_SIZE,
win_length=WINDOW_SIZE, window=window,
center=center, pad_mode=pad_mode,
freeze_parameters=True).to(device)
logmel_extractor = LogmelFilterBank(sr=SR, n_fft=WINDOW_SIZE,
n_mels=N_MELS, fmin=FMIN, fmax=FMAX,
ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True).to(device)
test_pred, ids = test_epoch(model, spectrogram_extractor, logmel_extractor, test_loader,
f_min_mels, f_max_mels, device, resize=True)
test_pred_df = pd.DataFrame({
"recording_id": valid_fold.recording_id.values
})
test_pred_df["kfold"] = fold
for i in range(24):
test_pred_df[f"s{i}"] = 0
test_pred_df[[f's{i}' for i in range(24)]] = test_pred
return test_pred_df
def inference(fold: int):
# Load Data
sub_df = pd.read_csv("../input/sample_submission.csv")
species_fmin_fmax = pd.read_csv(OUTPUT_DIR / "species_fmin_fmax.csv")
f_min_mels = torch.tensor(species_fmin_fmax["f_min_mel"].values, dtype=torch.int)
f_max_mels = torch.tensor(species_fmin_fmax["f_max_mel"].values, dtype=torch.int)
# Load model
model = AudioClassifier(CFG.model_param["encoder"], CFG.model_param["classes_num"])
model.load_state_dict(torch.load(OUTPUT_DIR / f'fold-{fold}.bin'))
model = model.to(device)
# Get valid
test_dataset = TestDataset(
df=sub_df,
period=CFG.period,
transforms=None,
data_path="../input/test",
)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=CFG.batch_size//32,
shuffle=False,
drop_last=False,
num_workers=CFG.num_workers
)
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
spectrogram_extractor = Spectrogram(n_fft=WINDOW_SIZE, hop_length=HOP_SIZE,
win_length=WINDOW_SIZE, window=window,
center=center, pad_mode=pad_mode,
freeze_parameters=True).to(device)
logmel_extractor = LogmelFilterBank(sr=SR, n_fft=WINDOW_SIZE,
n_mels=N_MELS, fmin=FMIN, fmax=FMAX,
ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True).to(device)
test_pred, ids = test_epoch(model, spectrogram_extractor, logmel_extractor, test_loader,
f_min_mels, f_max_mels, device, resize=True)
test_pred_df = pd.DataFrame({
"recording_id": sub_df.recording_id.values
})
test_pred_df["kfold"] = fold
for i in range(24):
test_pred_df[f"s{i}"] = 0
test_pred_df[[f's{i}' for i in range(24)]] = test_pred
return test_pred_df
################################################
# Training Loop #
################################################
def | |
<reponame>jyothish6190/sift-python
import datetime
import warnings
import json
import mock
import sift
import unittest
import sys
import requests.exceptions
if sys.version_info[0] < 3:
import urllib
else:
import urllib.parse as urllib
def valid_transaction_properties():
return {
'$buyer_user_id': '123456',
'$seller_user_id': '654321',
'$amount': 1253200,
'$currency_code': 'USD',
'$time': int(datetime.datetime.now().strftime('%s')),
'$transaction_id': 'my_transaction_id',
'$billing_name': '<NAME>',
'$billing_bin': '411111',
'$billing_last4': '1111',
'$billing_address1': '123 Main St.',
'$billing_city': 'San Francisco',
'$billing_region': 'CA',
'$billing_country': 'US',
'$billing_zip': '94131',
'$user_email': '<EMAIL>'
}
def valid_label_properties():
return {
'$abuse_type': 'content_abuse',
'$is_bad': True,
'$description': 'Listed a fake item',
'$source': 'Internal Review Queue',
'$analyst': '<EMAIL>'
}
def score_response_json():
return """{
"status": 0,
"error_message": "OK",
"user_id": "12345",
"score": 0.85,
"latest_label": {
"is_bad": true,
"time": 1450201660000
},
"scores": {
"content_abuse": {
"score": 0.14
},
"payment_abuse": {
"score": 0.97
}
},
"latest_labels": {
"promotion_abuse": {
"is_bad": false,
"time": 1457201099000
},
"payment_abuse": {
"is_bad": true,
"time": 1457212345000
}
}
}"""
# A sample response from the /{version}/users/{userId}/score API.
USER_SCORE_RESPONSE_JSON = """{
"status": 0,
"error_message": "OK",
"entity_type": "user",
"entity_id": "12345",
"scores": {
"content_abuse": {
"score": 0.14
},
"payment_abuse": {
"score": 0.97
}
},
"latest_decisions": {
"payment_abuse": {
"id": "user_looks_bad_payment_abuse",
"category": "block",
"source": "AUTOMATED_RULE",
"time": 1352201880,
"description": "Bad Fraudster"
}
},
"latest_labels": {
"promotion_abuse": {
"is_bad": false,
"time": 1457201099000
},
"payment_abuse": {
"is_bad": true,
"time": 1457212345000
}
}
}"""
def action_response_json():
return """{
"actions": [
{
"action": {
"id": "freds_action"
},
"entity": {
"id": "Fred"
},
"id": "ACTION1234567890:freds_action",
"triggers": [
{
"source": "synchronous_action",
"trigger": {
"id": "TRIGGER1234567890"
},
"type": "formula"
}
]
}
],
"score": 0.85,
"status": 0,
"error_message": "OK",
"user_id": "Fred",
"scores": {
"content_abuse": {
"score": 0.14
},
"payment_abuse": {
"score": 0.97
}
},
"latest_labels": {
"promotion_abuse": {
"is_bad": false,
"time": 1457201099000
},
"payment_abuse": {
"is_bad": true,
"time": 1457212345000
}
}
}"""
def response_with_data_header():
return {
'content-type': 'application/json; charset=UTF-8'
}
class TestSiftPythonClient(unittest.TestCase):
def setUp(self):
self.test_key = 'a_fake_test_api_key'
self.account_id = 'ACCT'
self.sift_client = sift.Client(api_key=self.test_key, account_id=self.account_id)
def test_global_api_key(self):
# test for error if global key is undefined
self.assertRaises(TypeError, sift.Client)
sift.api_key = "a_test_global_api_key"
local_api_key = "a_test_local_api_key"
client1 = sift.Client()
client2 = sift.Client(local_api_key)
# test that global api key is assigned
assert(client1.api_key == sift.api_key)
# test that local api key is assigned
assert(client2.api_key == local_api_key)
client2 = sift.Client()
# test that client2 is assigned a new object with global api_key
assert(client2.api_key == sift.api_key)
def test_constructor_requires_valid_api_key(self):
self.assertRaises(TypeError, sift.Client, None)
self.assertRaises(ValueError, sift.Client, '')
def test_constructor_invalid_api_url(self):
self.assertRaises(TypeError, sift.Client, self.test_key, None)
self.assertRaises(ValueError, sift.Client, self.test_key, '')
def test_constructor_api_key(self):
client = sift.Client(self.test_key)
self.assertEqual(client.api_key, self.test_key)
def test_track_requires_valid_event(self):
self.assertRaises(TypeError, self.sift_client.track, None, {})
self.assertRaises(ValueError, self.sift_client.track, '', {})
self.assertRaises(TypeError, self.sift_client.track, 42, {})
def test_track_requires_properties(self):
event = 'custom_event'
self.assertRaises(TypeError, self.sift_client.track, event, None)
self.assertRaises(TypeError, self.sift_client.track, event, 42)
self.assertRaises(ValueError, self.sift_client.track, event, {})
def test_score_requires_user_id(self):
self.assertRaises(TypeError, self.sift_client.score, None)
self.assertRaises(ValueError, self.sift_client.score, '')
self.assertRaises(TypeError, self.sift_client.score, 42)
def test_event_ok(self):
event = '$transaction'
mock_response = mock.Mock()
mock_response.content = '{"status": 0, "error_message": "OK"}'
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.track(event, valid_transaction_properties())
mock_post.assert_called_with(
'https://api.siftscience.com/v205/events',
data=mock.ANY,
headers=mock.ANY,
timeout=mock.ANY,
params={})
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_status == 0)
assert(response.api_error_message == "OK")
def test_event_with_timeout_param_ok(self):
event = '$transaction'
test_timeout = 5
mock_response = mock.Mock()
mock_response.content = '{"status": 0, "error_message": "OK"}'
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.track(
event, valid_transaction_properties(), timeout=test_timeout)
mock_post.assert_called_with(
'https://api.siftscience.com/v205/events',
data=mock.ANY,
headers=mock.ANY,
timeout=test_timeout,
params={})
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_status == 0)
assert(response.api_error_message == "OK")
def test_score_ok(self):
mock_response = mock.Mock()
mock_response.content = score_response_json()
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.score('12345')
mock_get.assert_called_with(
'https://api.siftscience.com/v205/score/12345',
params={'api_key': self.test_key},
headers=mock.ANY,
timeout=mock.ANY)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_error_message == "OK")
assert(response.body['score'] == 0.85)
assert(response.body['scores']['content_abuse']['score'] == 0.14)
assert(response.body['scores']['payment_abuse']['score'] == 0.97)
def test_score_with_timeout_param_ok(self):
test_timeout = 5
mock_response = mock.Mock()
mock_response.content = score_response_json()
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.score('12345', test_timeout)
mock_get.assert_called_with(
'https://api.siftscience.com/v205/score/12345',
params={'api_key': self.test_key},
headers=mock.ANY,
timeout=test_timeout)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_error_message == "OK")
assert(response.body['score'] == 0.85)
assert(response.body['scores']['content_abuse']['score'] == 0.14)
assert(response.body['scores']['payment_abuse']['score'] == 0.97)
def test_get_user_score_ok(self):
"""Test the GET /{version}/users/{userId}/score API, i.e. client.get_user_score()
"""
test_timeout = 5
mock_response = mock.Mock()
mock_response.content = USER_SCORE_RESPONSE_JSON
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.get_user_score('12345', test_timeout)
mock_get.assert_called_with(
'https://api.siftscience.com/v205/users/12345/score',
params={'api_key': self.test_key},
headers=mock.ANY,
timeout=test_timeout)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_error_message == "OK")
assert(response.body['entity_id'] == '12345')
assert(response.body['scores']['content_abuse']['score'] == 0.14)
assert(response.body['scores']['payment_abuse']['score'] == 0.97)
assert('latest_decisions' in response.body)
def test_get_user_score_with_abuse_types_ok(self):
"""Test the GET /{version}/users/{userId}/score?abuse_types=... API, i.e. client.get_user_score()
"""
test_timeout = 5
mock_response = mock.Mock()
mock_response.content = USER_SCORE_RESPONSE_JSON
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.get_user_score('12345',
abuse_types=['payment_abuse', 'content_abuse'],
timeout=test_timeout)
mock_get.assert_called_with(
'https://api.siftscience.com/v205/users/12345/score',
params={'api_key': self.test_key, 'abuse_types': 'payment_abuse,content_abuse'},
headers=mock.ANY,
timeout=test_timeout)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_error_message == "OK")
assert(response.body['entity_id'] == '12345')
assert(response.body['scores']['content_abuse']['score'] == 0.14)
assert(response.body['scores']['payment_abuse']['score'] == 0.97)
assert('latest_decisions' in response.body)
def test_rescore_user_ok(self):
"""Test the POST /{version}/users/{userId}/score API, i.e. client.rescore_user()
"""
test_timeout = 5
mock_response = mock.Mock()
mock_response.content = USER_SCORE_RESPONSE_JSON
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.rescore_user('12345', test_timeout)
mock_post.assert_called_with(
'https://api.siftscience.com/v205/users/12345/score',
params={'api_key': self.test_key},
headers=mock.ANY,
timeout=test_timeout)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_error_message == "OK")
assert(response.body['entity_id'] == '12345')
assert(response.body['scores']['content_abuse']['score'] == 0.14)
assert(response.body['scores']['payment_abuse']['score'] == 0.97)
assert('latest_decisions' in response.body)
def test_rescore_user_with_abuse_types_ok(self):
"""Test the POST /{version}/users/{userId}/score?abuse_types=... API, i.e. client.rescore_user()
"""
test_timeout = 5
mock_response = mock.Mock()
mock_response.content = USER_SCORE_RESPONSE_JSON
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.rescore_user('12345',
abuse_types=['payment_abuse', 'content_abuse'],
timeout=test_timeout)
mock_post.assert_called_with(
'https://api.siftscience.com/v205/users/12345/score',
params={'api_key': self.test_key, 'abuse_types': 'payment_abuse,content_abuse'},
headers=mock.ANY,
timeout=test_timeout)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_error_message == "OK")
assert(response.body['entity_id'] == '12345')
assert(response.body['scores']['content_abuse']['score'] == 0.14)
assert(response.body['scores']['payment_abuse']['score'] == 0.97)
assert('latest_decisions' in response.body)
def test_sync_score_ok(self):
event = '$transaction'
mock_response = mock.Mock()
mock_response.content = ('{"status": 0, "error_message": "OK", "score_response": %s}'
% score_response_json())
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.track(
event,
valid_transaction_properties(),
return_score=True,
abuse_types=['payment_abuse', 'content_abuse', 'legacy'])
mock_post.assert_called_with(
'https://api.siftscience.com/v205/events',
data=mock.ANY,
headers=mock.ANY,
timeout=mock.ANY,
params={'return_score': 'true', 'abuse_types': 'payment_abuse,content_abuse,legacy'})
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.api_status == 0)
assert(response.api_error_message == "OK")
assert(response.body['score_response']['score'] == 0.85)
assert(response.body['score_response']['scores']['content_abuse']['score'] == 0.14)
assert(response.body['score_response']['scores']['payment_abuse']['score'] == 0.97)
def test_get_decisions_fails(self):
with self.assertRaises(ValueError):
self.sift_client.get_decisions('usr')
def test_get_decisions(self):
mock_response = mock.Mock()
get_decisions_response_json = """
{
"data": [
{
"id": "block_user",
"name": "Block user",
"description": "user has a different billing and shipping addresses",
"entity_type": "user",
"abuse_type": "legacy",
"category": "block",
"webhook_url": "http://web.hook",
"created_at": "1468005577348",
"created_by": "<EMAIL>",
"updated_at": "1469229177756",
"updated_by": "<EMAIL>"
}
],
"has_more": "true",
"next_ref": "v3/accounts/accountId/decisions"
}
"""
mock_response.content = get_decisions_response_json
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.get_decisions(entity_type="user",
limit=10,
start_from=None,
abuse_types="legacy,payment_abuse",
timeout=3)
mock_get.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/decisions',
headers=mock.ANY,
auth=mock.ANY,
params={'entity_type': 'user', 'limit': 10, 'abuse_types': 'legacy,payment_abuse'},
timeout=3)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.body['data'][0]['id'] == 'block_user')
def test_get_decisions_entity_session(self):
mock_response = mock.Mock()
get_decisions_response_json = """
{
"data": [
{
"id": "block_session",
"name": "Block session",
"description": "session has problems",
"entity_type": "session",
"abuse_type": "legacy",
"category": "block",
"webhook_url": "http://web.hook",
"created_at": "1468005577348",
"created_by": "<EMAIL>",
"updated_at": "1469229177756",
"updated_by": "<EMAIL>"
}
],
"has_more": "true",
"next_ref": "v3/accounts/accountId/decisions"
}
"""
mock_response.content = get_decisions_response_json
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'get') as mock_get:
mock_get.return_value = mock_response
response = self.sift_client.get_decisions(entity_type="session",
limit=10,
start_from=None,
abuse_types="account_takeover",
timeout=3)
mock_get.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/decisions',
headers=mock.ANY,
auth=mock.ANY,
params={'entity_type': 'session', 'limit': 10, 'abuse_types': 'account_takeover'},
timeout=3)
self.assertIsInstance(response, sift.client.Response)
assert(response.is_ok())
assert(response.body['data'][0]['id'] == 'block_session')
def test_apply_decision_to_user_ok(self):
user_id = '54321'
mock_response = mock.Mock()
apply_decision_request = {
'decision_id': 'user_looks_ok_legacy',
'source': 'MANUAL_REVIEW',
'analyst': '<EMAIL>',
'description': 'called user and verified account',
'time': 1481569575
}
apply_decision_response_json = """
{
"entity": {
"id": "54321",
"type": "user"
},
"decision": {
"id": "user_looks_ok_legacy"
},
"time": "1481569575"
}
"""
mock_response.content = apply_decision_response_json
mock_response.json.return_value = json.loads(mock_response.content)
mock_response.status_code = 200
mock_response.headers = response_with_data_header()
with mock.patch.object(self.sift_client.session, 'post') as mock_post:
mock_post.return_value = mock_response
response = self.sift_client.apply_user_decision(user_id, apply_decision_request)
data = json.dumps(apply_decision_request)
mock_post.assert_called_with(
'https://api3.siftscience.com/v3/accounts/ACCT/users/%s/decisions' % user_id,
auth=mock.ANY, data=data, headers=mock.ANY, timeout=mock.ANY)
self.assertIsInstance(response, sift.client.Response)
assert(response.body['entity']['type'] == 'user')
assert(response.http_status_code == 200)
assert(response.is_ok())
def test_validate_no_user_id_string_fails(self):
apply_decision_request = {
'decision_id': 'user_looks_ok_legacy',
'source': 'MANUAL_REVIEW',
'analyst': '<EMAIL>',
'description': 'called user and verified account',
}
with self.assertRaises(TypeError):
self.sift_client._validate_apply_decision_request(apply_decision_request, 123)
def test_apply_decision_to_order_fails_with_no_order_id(self):
with self.assertRaises(TypeError):
self.sift_client.apply_order_decision("user_id", None, {})
def test_apply_decision_to_session_fails_with_no_session_id(self):
with self.assertRaises(TypeError):
self.sift_client.apply_session_decision("user_id", None, {})
def test_get_session_decisions_fails_with_no_session_id(self):
with self.assertRaises(TypeError):
self.sift_client.get_session_decisions("user_id", None)
def test_apply_decision_to_content_fails_with_no_content_id(self):
with self.assertRaises(TypeError):
self.sift_client.apply_content_decision("user_id", None, | |
Return the poset of equivalence classes of the subsets of ``self``.
Each element of the poset is a set of :class:`ManifoldSubset` instances,
which are known to be equal.
INPUT:
- ``open_covers`` -- (default: ``False``) whether to include vertices for open covers
- ``points`` -- (default: ``False``) whether to include vertices for declared points;
this can also be an iterable for the points to include
- ``lower_bound`` -- (default: ``None``) only include supersets of this
EXAMPLES::
sage: M = Manifold(3, 'M')
sage: U = M.open_subset('U'); V = M.open_subset('V'); W = M.open_subset('W')
sage: P = M.subset_poset(); P
Finite poset containing 4 elements
sage: P.plot(element_labels={element: element._name for element in P})
Graphics object consisting of 8 graphics primitives
sage: VW = V.union(W)
sage: P = M.subset_poset(); P
Finite poset containing 5 elements
sage: P.maximal_elements()
[Set {M} of open subsets of the 3-dimensional differentiable manifold M]
sage: sorted(P.minimal_elements(), key=lambda v: v._name)
[Set {U} of open subsets of the 3-dimensional differentiable manifold M,
Set {V} of open subsets of the 3-dimensional differentiable manifold M,
Set {W} of open subsets of the 3-dimensional differentiable manifold M]
sage: from sage.manifolds.subset import ManifoldSubsetFiniteFamily
sage: sorted(P.lower_covers(ManifoldSubsetFiniteFamily([M])), key=str)
[Set {U} of open subsets of the 3-dimensional differentiable manifold M,
Set {V_union_W} of open subsets of the 3-dimensional differentiable manifold M]
sage: P.plot(element_labels={element: element._name for element in P})
Graphics object consisting of 10 graphics primitives
If ``open_covers`` is ``True``, the poset includes a special vertex for
each nontrivial open cover of a subset::
sage: P = M.subset_poset(open_covers=True); P
Finite poset containing 6 elements
sage: from sage.manifolds.subset import ManifoldSubsetFiniteFamily
sage: sorted(P.upper_covers(ManifoldSubsetFiniteFamily([VW])), key=str)
[(Set {V} of open subsets of the 3-dimensional differentiable manifold M,
Set {W} of open subsets of the 3-dimensional differentiable manifold M),
Set {M} of open subsets of the 3-dimensional differentiable manifold M]
sage: def label(element):
....: try:
....: return element._name
....: except AttributeError:
....: return '[' + ', '.join(sorted(x._name for x in element)) + ']'
sage: P.plot(element_labels={element: label(element) for element in P})
Graphics object consisting of 12 graphics primitives
.. PLOT::
def label(element):
try:
return element._name
except AttributeError:
return '[' + ', '.join(sorted(x._name for x in element)) + ']'
M = Manifold(3, 'M')
U = M.open_subset('U'); V = M.open_subset('V'); W = M.open_subset('W')
P = M.subset_poset()
g1 = P.plot(element_labels={element: label(element) for element in P})
VW = V.union(W)
P = M.subset_poset()
g2 = P.plot(element_labels={element: label(element) for element in P})
P = M.subset_poset(open_covers=True)
g3 = P.plot(element_labels={element: label(element) for element in P})
sphinx_plot(graphics_array([g1, g2, g3]), figsize=(8, 3))
"""
from sage.combinat.posets.posets import Poset
return Poset(self.subset_digraph(open_covers=open_covers, points=points,
quotient=True, lower_bound=lower_bound))
def equal_subsets(self):
r"""
Generate the declared manifold subsets that are equal to ``self``.
.. NOTE::
To get the equal subsets as a family, sorted by name, use the method
:meth:`equal_subset_family` instead.
EXAMPLES::
sage: M = Manifold(2, 'M', structure='topological')
sage: U = M.open_subset('U')
sage: V = U.subset('V')
sage: V.declare_equal(M)
sage: sorted(V.equal_subsets(), key=lambda v: v._name)
[2-dimensional topological manifold M,
Open subset U of the 2-dimensional topological manifold M,
Subset V of the 2-dimensional topological manifold M]
"""
for S in self.supersets():
if S in self._subsets:
yield S
def equal_subset_family(self):
r"""
Generate the declared manifold subsets that are equal to ``self``.
.. NOTE::
If you only need to iterate over the equal sets in arbitrary order,
you can use the generator method :meth:`equal_subsets` instead.
EXAMPLES::
sage: M = Manifold(2, 'M', structure='topological')
sage: U = M.open_subset('U')
sage: V = U.subset('V')
sage: V.declare_equal(M)
sage: V.equal_subset_family()
Set {M, U, V} of subsets of the 2-dimensional topological manifold M
"""
return ManifoldSubsetFiniteFamily(self.equal_subsets())
def supersets(self):
r"""
Generate the declared supersets of the current subset.
.. NOTE::
To get the supersets as a family, sorted by name, use the method
:meth:`superset_family` instead.
EXAMPLES::
sage: M = Manifold(2, 'M', structure='topological')
sage: U = M.open_subset('U')
sage: V = M.subset('V')
sage: sorted(V.supersets(), key=lambda v: v._name)
[2-dimensional topological manifold M,
Subset V of the 2-dimensional topological manifold M]
"""
yield from self._supersets
def superset_family(self):
r"""
Return the family of declared supersets of the current subset.
The family is sorted by the alphabetical names of the supersets.
OUTPUT:
- a :class:`ManifoldSubsetFiniteFamily` instance containing all the
supersets
.. NOTE::
If you only need to iterate over the supersets in arbitrary order,
you can use the generator method :meth:`supersets` instead.
EXAMPLES::
sage: M = Manifold(2, 'M', structure='topological')
sage: U = M.open_subset('U')
sage: V = M.subset('V')
sage: V.superset_family()
Set {M, V} of subsets of the 2-dimensional topological manifold M
"""
return ManifoldSubsetFiniteFamily(self.supersets())
def superset_digraph(self, loops=False, quotient=False, open_covers=False, points=False, upper_bound=None):
r"""
Return the digraph whose arcs represent subset relations among the supersets of ``self``.
INPUT:
- ``loops`` -- (default: ``False``) whether to include the trivial containment
of each subset in itself as loops of the digraph
- ``quotient`` -- (default: ``False``) whether to contract directed cycles in the graph,
replacing equivalence classes of equal subsets by a single vertex.
In this case, each vertex of the digraph is a set of :class:`ManifoldSubset`
instances.
- ``open_covers`` -- (default: ``False``) whether to include vertices for open covers
- ``points`` -- (default: ``False``) whether to include vertices for declared points;
this can also be an iterable for the points to include
- ``upper_bound`` -- (default: ``None``) only include subsets of this
EXAMPLES::
sage: M = Manifold(3, 'M')
sage: U = M.open_subset('U'); V = M.open_subset('V'); W = M.open_subset('W')
sage: VW = V.union(W)
sage: P = V.superset_digraph(loops=False, upper_bound=VW); P
Digraph on 2 vertices
"""
if upper_bound is None:
upper_bound = self._manifold
return upper_bound.subset_digraph(loops=loops, open_covers=open_covers, points=points,
quotient=quotient, lower_bound=self)
def superset_poset(self, open_covers=False, points=False, upper_bound=None):
r"""
Return the poset of the supersets of ``self``.
INPUT:
- ``open_covers`` -- (default: ``False``) whether to include vertices for open covers
- ``points`` -- (default: ``False``) whether to include vertices for declared points;
this can also be an iterable for the points to include
- ``upper_bound`` -- (default: ``None``) only include subsets of this
EXAMPLES::
sage: M = Manifold(3, 'M')
sage: U = M.open_subset('U'); V = M.open_subset('V'); W = M.open_subset('W')
sage: VW = V.union(W)
sage: P = V.superset_poset(); P
Finite poset containing 3 elements
sage: P.plot(element_labels={element: element._name for element in P})
Graphics object consisting of 6 graphics primitives
"""
if upper_bound is None:
upper_bound = self._manifold
return upper_bound.subset_poset(open_covers=open_covers, points=points,
lower_bound=self)
def get_subset(self, name):
r"""
Get a subset by its name.
The subset must have been previously created by the method
:meth:`subset` (or
:meth:`~sage.manifolds.manifold.TopologicalManifold.open_subset`)
INPUT:
- ``name`` -- (string) name of the subset
OUTPUT:
- instance of :class:`~sage.manifolds.subset.ManifoldSubset` (or
of the derived class
:class:`~sage.manifolds.manifold.TopologicalManifold` for an open
subset) representing the subset whose name is ``name``
EXAMPLES::
sage: M = Manifold(4, 'M', structure='topological')
sage: A = M.subset('A')
sage: B = A.subset('B')
sage: U = M.open_subset('U')
sage: M.subset_family()
Set {A, B, M, U} of subsets of the 4-dimensional topological manifold M
sage: M.get_subset('A')
Subset A of the 4-dimensional topological manifold M
sage: M.get_subset('A') is A
True
sage: M.get_subset('B') is B
True
sage: A.get_subset('B') is B
True
sage: M.get_subset('U')
Open subset U of the 4-dimensional topological manifold M
sage: M.get_subset('U') is U
True
"""
for ss in self._subsets:
if ss._name == name:
return ss
raise ValueError("no subset of name '{}' found".format(name))
#### End of accessors
def is_subset(self, other):
r"""
Return ``True`` if and only if ``self`` is included in ``other``.
EXAMPLES:
Subsets on a 2-dimensional manifold::
sage: M = Manifold(2, 'M', structure='topological')
sage: a = M.subset('A')
sage: b = a.subset('B')
sage: c = M.subset('C')
sage: a.is_subset(M)
True
sage: b.is_subset(a)
True
sage: b.is_subset(M)
True
sage: a.is_subset(b)
False
sage: c.is_subset(a)
False
"""
return self in other._subsets
def declare_union(self, *subsets_or_families, disjoint=False):
r"""
Declare that the current subset is the union of two subsets.
Suppose `U` is the current subset, then this method declares
that `U = \bigcup_{S\in F} S`.
INPUT:
- ``subsets_or_families`` -- finitely many subsets or iterables of subsets
- ``disjoint`` -- (default: ``False``) whether to declare the subsets
pairwise disjoint
EXAMPLES::
sage: M = Manifold(2, 'M', structure='topological')
sage: AB = M.subset('AB')
sage: | |
self.connection and self.connection.is_open:
log.debug('Stopping watcher task')
self._watch_stopping.set()
await self._watch_stopped.wait()
log.debug('Closing model connection')
await self.connection.close()
self.connection = None
async def add_local_charm_dir(self, charm_dir, series):
"""Upload a local charm to the model.
This will automatically generate an archive from
the charm dir.
:param charm_dir: Path to the charm directory
:param series: Charm series
"""
fh = tempfile.NamedTemporaryFile()
CharmArchiveGenerator(charm_dir).make_archive(fh.name)
with fh:
func = partial(
self.add_local_charm, fh, series, os.stat(fh.name).st_size)
charm_url = await self.loop.run_in_executor(None, func)
log.debug('Uploaded local charm: %s -> %s', charm_dir, charm_url)
return charm_url
def add_local_charm(self, charm_file, series, size=None):
"""Upload a local charm archive to the model.
Returns the 'local:...' url that should be used to deploy the charm.
:param charm_file: Path to charm zip archive
:param series: Charm series
:param size: Size of the archive, in bytes
:return str: 'local:...' url for deploying the charm
:raises: :class:`JujuError` if the upload fails
Uses an https endpoint at the same host:port as the wss.
Supports large file uploads.
.. warning::
This method will block. Consider using :meth:`add_local_charm_dir`
instead.
"""
conn, headers, path_prefix = self.connection.https_connection()
path = "%s/charms?series=%s" % (path_prefix, series)
headers['Content-Type'] = 'application/zip'
if size:
headers['Content-Length'] = size
conn.request("POST", path, charm_file, headers)
response = conn.getresponse()
result = response.read().decode()
if not response.status == 200:
raise JujuError(result)
result = json.loads(result)
return result['charm-url']
def all_units_idle(self):
"""Return True if all units are idle.
"""
for unit in self.units.values():
unit_status = unit.data['agent-status']['current']
if unit_status != 'idle':
return False
return True
async def reset(self, force=False):
"""Reset the model to a clean state.
:param bool force: Force-terminate machines.
This returns only after the model has reached a clean state. "Clean"
means no applications or machines exist in the model.
"""
log.debug('Resetting model')
for app in self.applications.values():
await app.destroy()
for machine in self.machines.values():
await machine.destroy(force=force)
await self.block_until(
lambda: len(self.machines) == 0
)
async def block_until(self, *conditions, timeout=None, wait_period=0.5):
"""Return only after all conditions are true.
"""
async def _block():
while not all(c() for c in conditions):
await asyncio.sleep(wait_period, loop=self.loop)
await asyncio.wait_for(_block(), timeout, loop=self.loop)
@property
def applications(self):
"""Return a map of application-name:Application for all applications
currently in the model.
"""
return self.state.applications
@property
def machines(self):
"""Return a map of machine-id:Machine for all machines currently in
the model.
"""
return self.state.machines
@property
def units(self):
"""Return a map of unit-id:Unit for all units currently in
the model.
"""
return self.state.units
async def get_info(self):
"""Return a client.ModelInfo object for this Model.
Retrieves latest info for this Model from the api server. The
return value is cached on the Model.info attribute so that the
valued may be accessed again without another api call, if
desired.
This method is called automatically when the Model is connected,
resulting in Model.info being initialized without requiring an
explicit call to this method.
"""
facade = client.ClientFacade.from_connection(self.connection)
self.info = await facade.ModelInfo()
log.debug('Got ModelInfo: %s', vars(self.info))
return self.info
def add_observer(
self, callable_, entity_type=None, action=None, entity_id=None,
predicate=None):
"""Register an "on-model-change" callback
Once the model is connected, ``callable_``
will be called each time the model changes. ``callable_`` should
be Awaitable and accept the following positional arguments:
delta - An instance of :class:`juju.delta.EntityDelta`
containing the raw delta data recv'd from the Juju
websocket.
old_obj - If the delta modifies an existing object in the model,
old_obj will be a copy of that object, as it was before the
delta was applied. Will be None if the delta creates a new
entity in the model.
new_obj - A copy of the new or updated object, after the delta
is applied. Will be None if the delta removes an entity
from the model.
model - The :class:`Model` itself.
Events for which ``callable_`` is called can be specified by passing
entity_type, action, and/or entitiy_id filter criteria, e.g.::
add_observer(
myfunc,
entity_type='application', action='add', entity_id='ubuntu')
For more complex filtering conditions, pass a predicate function. It
will be called with a delta as its only argument. If the predicate
function returns True, the ``callable_`` will be called.
"""
observer = _Observer(
callable_, entity_type, action, entity_id, predicate)
self.observers[observer] = callable_
def _watch(self):
"""Start an asynchronous watch against this model.
See :meth:`add_observer` to register an onchange callback.
"""
async def _start_watch():
try:
allwatcher = client.AllWatcherFacade.from_connection(
self.connection)
while not self._watch_stopping.is_set():
results = await utils.run_with_interrupt(
allwatcher.Next(),
self._watch_stopping,
self.loop)
if self._watch_stopping.is_set():
break
for delta in results.deltas:
delta = get_entity_delta(delta)
old_obj, new_obj = self.state.apply_delta(delta)
await self._notify_observers(delta, old_obj, new_obj)
self._watch_received.set()
except CancelledError:
pass
except Exception:
log.exception('Error in watcher')
raise
finally:
self._watch_stopped.set()
log.debug('Starting watcher task')
self._watch_received.clear()
self._watch_stopping.clear()
self._watch_stopped.clear()
self.loop.create_task(_start_watch())
async def _notify_observers(self, delta, old_obj, new_obj):
"""Call observing callbacks, notifying them of a change in model state
:param delta: The raw change from the watcher
(:class:`juju.client.overrides.Delta`)
:param old_obj: The object in the model that this delta updates.
May be None.
:param new_obj: The object in the model that is created or updated
by applying this delta.
"""
if new_obj and not old_obj:
delta.type = 'add'
log.debug(
'Model changed: %s %s %s',
delta.entity, delta.type, delta.get_id())
for o in self.observers:
if o.cares_about(delta):
asyncio.ensure_future(o(delta, old_obj, new_obj, self),
loop=self.loop)
async def _wait(self, entity_type, entity_id, action, predicate=None):
"""
Block the calling routine until a given action has happened to the
given entity
:param entity_type: The entity's type.
:param entity_id: The entity's id.
:param action: the type of action (e.g., 'add', 'change', or 'remove')
:param predicate: optional callable that must take as an
argument a delta, and must return a boolean, indicating
whether the delta contains the specific action we're looking
for. For example, you might check to see whether a 'change'
has a 'completed' status. See the _Observer class for details.
"""
q = asyncio.Queue(loop=self.loop)
async def callback(delta, old, new, model):
await q.put(delta.get_id())
self.add_observer(callback, entity_type, action, entity_id, predicate)
entity_id = await q.get()
# object might not be in the entity_map if we were waiting for a
# 'remove' action
return self.state._live_entity_map(entity_type).get(entity_id)
async def _wait_for_new(self, entity_type, entity_id=None, predicate=None):
"""Wait for a new object to appear in the Model and return it.
Waits for an object of type ``entity_type`` with id ``entity_id``.
If ``entity_id`` is ``None``, it will wait for the first new entity
of the correct type.
This coroutine blocks until the new object appears in the model.
"""
# if the entity is already in the model, just return it
if entity_id in self.state._live_entity_map(entity_type):
return self.state._live_entity_map(entity_type)[entity_id]
# if we know the entity_id, we can trigger on any action that puts
# the enitty into the model; otherwise, we have to watch for the
# next "add" action on that entity_type
action = 'add' if entity_id is None else None
return await self._wait(entity_type, entity_id, action, predicate)
async def wait_for_action(self, action_id):
"""Given an action, wait for it to complete."""
if action_id.startswith("action-"):
# if we've been passed action.tag, transform it into the
# id that the api deltas will use.
action_id = action_id[7:]
def predicate(delta):
return delta.data['status'] in ('completed', 'failed')
return await self._wait('action', action_id, 'change', predicate)
async def add_machine(
self, spec=None, constraints=None, disks=None, series=None):
"""Start a new, empty machine and optionally a container, or add a
container to a machine.
:param str spec: Machine specification
Examples::
(None) - starts a new machine
'lxd' - starts a new machine with one lxd container
'lxd:4' - starts a new lxd container on machine 4
'ssh:user@10.10.0.3' - manually provisions a machine with ssh
'zone=us-east-1a' - starts a machine in zone us-east-1s on AWS
'maas2.name' - acquire machine maas2.name on MAAS
:param dict constraints: Machine constraints
Example::
constraints={
'mem': 256 * MB,
}
:param list disks: List of disk constraint dictionaries
Example::
disks=[{
'pool': 'rootfs',
'size': 10 * GB,
'count': 1,
}]
:param str series: Series, e.g. 'xenial'
Supported container types are: lxd, kvm
When deploying a container to an existing machine, constraints cannot
be used.
"""
params = client.AddMachineParams()
params.jobs = ['JobHostUnits']
if spec:
placement = parse_placement(spec)
if placement:
params.placement = placement[0]
if constraints:
params.constraints = client.Value.from_json(constraints)
if disks:
params.disks = [
client.Constraints.from_json(o) for o in disks]
if series:
params.series = series
# Submit the | |
<filename>tf_agents/synthetic_experiment.py
import os
os.environ["CUDA_VISIBLE_DEVICES"]="1"
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"]="true"
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
from tqdm.notebook import tqdm
tf.compat.v1.enable_v2_behavior()
from tf_agents.curiosity import m_passthrough_action
import pixiedust
from functools import partial
import gin
import itertools
import multiprocessing
import pickle
from copy import deepcopy
from tensorflow_model_optimization.python.core.sparsity.keras import pruning_schedule as pruning_sched
from tensorflow_model_optimization.python.core.sparsity.keras import pruning_wrapper
from tensorflow_model_optimization.python.core.sparsity.keras import prune
from tensorflow_model_optimization.python.core.sparsity.keras import pruning_callbacks
dim = 2
def set_dimension(d):
global dim
dim = d
def matrix_dist(A, B):
return np.linalg.norm((A - B).flatten(), ord=1)
def projection_simplex_sort(v, z=1):
n_features = v.shape[0]
if np.linalg.norm(v, ord=1) <= z:
return v
# algorithm from
# https://gist.github.com/EdwardRaff/f4f4cf0c927c2addfb39
u = np.sort(v)[::-1]
cssv = np.cumsum(u) - z
ind = np.arange(n_features) + 1
cond = u - cssv / ind > 0
rho = ind[cond][-1]
theta = cssv[cond][-1] / float(rho)
w = np.maximum(v - theta, 0)
return w
def project_l1(v, z=1):
"""Project onto |x|_1<=z."""
v = np.array(v)
signs = np.sign(v)
v_unsigned = np.multiply(v, signs)
u_unsigned = projection_simplex_sort(v_unsigned, z)
u = np.multiply(u_unsigned, signs)
norm = np.linalg.norm(u, ord=1)
assert norm <= z * 1.1, "Got norm %.2f %s" % (norm, str(u))
#assert np.allclose(np.linalg.norm(u, ord=1), 1)
return u
@gin.configurable
def projection_step(w, l1_ball_size=None):
"""Project weights onto an l1 ball."""
#assert len(model.trainable_variables) == 1,\
# "Only support 1 weight tensor (now)."
weights = w
#w_flat = flatten_array_of_tensors(weights)
w_numpy = weights.numpy()
w_flat = np.reshape(w_numpy, (-1,))
if l1_ball_size is None:
norm = np.linalg.norm(w_flat, ord=1)
return norm
w_flat_new = project_l1(w_flat, l1_ball_size)
norm = np.linalg.norm(w_flat_new, ord=1)
w_flat_new = np.reshape(w_flat_new, w_numpy.shape)
weights.assign(w_flat_new)
return norm
@gin.configurable
def mask_step(w, mask=None, eps=1e-3):
"""Project weights onto an l1 ball."""
#assert len(model.trainable_variables) == 1,\
# "Only support 1 weight tensor (now)."
weights = w # model.trainable_variables[0]
nnz = np.sum(np.abs(weights.numpy()) > eps)
if mask is None: return nnz
w_numpy = weights.numpy()
assert w_numpy.shape == mask.shape, "Bad mask shape %s %s" % \
(str(w_numpy.shape), str(mask.shape))
weights.assign(np.multiply(mask, w_numpy))
nnz = np.sum(np.abs(weights.numpy()) > eps)
return nnz
@gin.configurable
def compute_mask(w, components_to_keep=5):
"""Select biggest components in w."""
# assuming no duplicates...
assert isinstance(components_to_keep, int), "Want int, got %s" % \
str(components_to_keep)
assert components_to_keep > 0, "No sense doing 0 components"
# weights of the model
w_flat = w.flatten()
# number of components in weights
n_components = w_flat.shape[0]
#print(n_components, components_to_keep)
# clamping components_to_keep
if components_to_keep > n_components:
components_to_keep = n_components
# selecting the threshold so that we have
# the correct number of components
threshold = np.sort(np.abs(w_flat))[::-1][components_to_keep - 1]
# print(w_flat, threshold)
# abs of w
w_abs = np.abs(w)
# computing the mask
mask = 1. * (w_abs > threshold)
left = components_to_keep - np.sum(mask > 0)
a, b = np.where(w_abs == threshold)
for i in range(left):
mask[a[i], b[i]] = 1.0
assert np.sum(mask > 0) == components_to_keep,\
"Wrong mask %s %d" % (str(mask), components_to_keep)
return mask
class MaxNorm1(tf.keras.constraints.Constraint):
def __init__(self, max_value=2, axis=0):
self.max_value = max_value
self.axis = axis
def __call__(self, w):
norms = K.sqrt(
math_ops.reduce_sum(math_ops.square(w), axis=self.axis, keepdims=True))
desired = K.clip(norms, 0, self.max_value)
return w * (desired / (K.epsilon() + norms))
def get_config(self):
return {'max_value': self.max_value, 'axis': self.axis}
projection_simplex_sort(np.array([1,0.1]), 3)
#%%pixie_debugger
# l1 parameters
l1coeff = 0
# for reconstructor
#l2coeff = 0
# for keras sparsity
sparsity = 0.3
pruning_params = {
'pruning_schedule': pruning_sched.ConstantSparsity(0.9, 0),
#'pruning_schedule': pruning_sched.PolynomialDecay(0, 0.3, 0, 100),
'block_size': (1, 1),
'block_pooling_type': 'AVG'
}
def component_diff_normalized(v):
"""How much the vector is close to (1,0) or (0,1)."""
v = np.abs(v)
v = np.sort(v)[::-1]
maxv = v[0]
smaxv = v[1]
return 1. - (maxv - smaxv) / maxv
def vec_angle_normalized(v1, v2):
"""Cos betweeen vectors."""
return np.abs(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))
def repr_quality(A):
"""Loss for representation quality for matrix A."""
columns = A.T # basis vectors = columns
result = 0
for i1, c1 in enumerate(columns):
for i2, c2 in enumerate(columns):
if i1 > i2: continue
elif i1 == i2:
result += component_diff_normalized(c1)
else:
result += vec_angle_normalized(c1, c2)
return result
@gin.configurable
def build_decoder_model(input_layer, init_fp_dist=None):
"""Create a decoder model."""
decoder = tf.keras.Sequential([ #D
input_layer,
tf.keras.layers.Dense(dim, activation=None, use_bias=False, #kernel_regularizer=tf.keras.regularizers.l2(l2coeff),
#kernel_initializer='random_normal',
#kernel_constraint=tf.keras.constraints.UnitNorm()
kernel_constraint=tf.keras.constraints.MinMaxNorm(0.5, 1.5) # == 1 --
),
])
if init_fp_dist is not None:
decoder.layers[-1].set_weights([np.linalg.inv(Q1).T +\
np.ones((dim, dim)) * init_fp_dist])
return decoder
@gin.configurable
def build_reconstructor_model(init_fp_dist=None):
"""Build the reconstructor."""
# encoder model -- imitates the RL agent which has converged to something -- and needs to reconstruct the state
# but the policy is "fixed" and the reward = max
reconstructor = tf.keras.Sequential([ # E
tf.keras.Input(shape=(dim,)),
tf.keras.layers.Dense(dim, activation=None, use_bias=False, #kernel_regularizer=tf.keras.regularizers.l2(l2coeff),
#kernel_initializer='random_normal',
#kernel_constraint=tf.keras.constraints.UnitNorm()
# how can we take the scale out of this -- decompose
kernel_constraint=tf.keras.constraints.MinMaxNorm(0.4, 2)
),
])
if init_fp_dist is not None:
reconstructor.layers[-1].set_weights([Q1.T + np.ones((dim, dim)) * init_fp_dist])
#reconstructor.layers[-1].set_weights([np.linalg.inv(decoder.get_weights()[0])])
return reconstructor
@gin.configurable
def build_feature_model(decoder, init_fp_dist=None, l1coeff=0.0):
"""Build the feature transition dynamics model."""
# maps observations to features
model = tf.keras.Sequential([ # M
m_passthrough_action(decoder, dim, dim), # D
tf.keras.Input(shape=(2 * dim,)),
#prune.prune_low_magnitude(
tf.keras.layers.Dense(dim, activation=None, use_bias=False, kernel_regularizer=tf.keras.regularizers.l1(l1coeff),
#kernel_initializer='random_normal'
), # M_D
#**pruning_params)
])
if init_fp_dist is not None:
model.layers[-1].set_weights([A.T + np.ones((2 * dim, dim)) * init_fp_dist])
return model
def loss_model_fit(y_true, y_pred, decoder=None, sample_weight=None):
"""How well the model fits the data?"""
del sample_weight
# y_pred = from the model
L = tf.reduce_mean(tf.abs(y_pred - decoder(y_true)))
return L
def loss_model_fit_rmd(y_true, y_pred, reconstructor=None, sample_weight=None):
"""How well the model fits the data?"""
del sample_weight
# y_pred = from the model
L = tf.reduce_mean(tf.abs(reconstructor(y_pred) - y_true))
return L
def loss_reconstructor(reconstructor, decoder, x):
"""How well the reconstructor can obtain observations?"""
# x is the input tensor (observations)
if x is None: return 0
x = x[:, :dim]
L = tf.reduce_mean(tf.abs(reconstructor(decoder(x)) - x))
return L
def list_of_lists_to_list(lst_of_lst):
"""Flatten a list of lists."""
return [x for lst in lst_of_lst for x in lst]
@tf.function
def flatten_array_of_tensors(W):
"""Take an array of tensor and turn into a single flat tensor."""
return tf.concat([tf.reshape(w, (-1,)) for w in W], axis=0)
def apply_optimizer(loss, models, optimizer, tape):
"""Do a step on the loss."""
# all their variables
all_variables = [model.trainable_variables for model in models]
grads = tape.gradient(loss, all_variables)
optimizer.apply_gradients(zip(list_of_lists_to_list(grads),
list_of_lists_to_list(all_variables)))
@gin.configurable
def step_rmd(model, decoder, reconstructor, xs, ys,
optimizer,
l_rec_coeff=1.0,
l1_coeff=0.0):
"""One optimization step."""
# xs - observations + actions
# ys - next observations
# converting dtype
xs = np.array(xs, dtype=np.float32)
ys = np.array(ys, dtype=np.float32)
with tf.GradientTape() as tape:
# Make prediction
y_pred = model(xs)
# Calculate loss
l_fit = loss_model_fit_rmd(ys, y_pred, reconstructor=reconstructor)
l_rec = loss_reconstructor(reconstructor=reconstructor,
decoder=decoder, x=xs)
# weight 0 is decoder
l_l1 = tf.norm(flatten_array_of_tensors([model.weights[1]]),
ord=1)
# total loss
total_loss = l_fit + l_rec_coeff * l_rec + \
l1_coeff * l_l1
# list of models
models = [model, reconstructor] # decoder weights are in the model
apply_optimizer(loss=total_loss, optimizer=optimizer,
tape=tape, models=models)
nnz = mask_step(model.weights[1])
l1 = projection_step(model.weights[1])
return {'l_fit': l_fit.numpy(), 'l_rec': l_rec.numpy(),
'l_l1': l1, 'nnz': nnz}
@gin.configurable
def step_rmd_2opt(model, decoder, reconstructor, xs, ys,
optimizer_rmd, optimizer_rd,
l_rec_coeff=1.0,
l1_coeff=0.0):
"""One optimization step."""
# xs - observations + actions
# ys - next observations
# converting dtype
xs = np.array(xs, dtype=np.float32)
ys = np.array(ys, dtype=np.float32)
# left out deliberately, don't want to update on it.
l_l1 = tf.norm(flatten_array_of_tensors([model.weights[1]]),
ord=1)
with tf.GradientTape() as tape_rmd, tf.GradientTape() as tape_rd:
# Make prediction
y_pred = model(xs)
# Calculate loss
l_fit = loss_model_fit_rmd(ys, y_pred, reconstructor=reconstructor)
l_rec = loss_reconstructor(reconstructor=reconstructor,
decoder=decoder, x=xs)
# weight 0 is decoder
rmd_loss = l_fit
rd_loss = l_rec
# list of models
models_rmd = [model, reconstructor] # decoder weights are in the model
models_rd = [reconstructor, decoder]
nnz = mask_step(model.weights[1])
l1 = projection_step(model.weights[1])
# returning the old loss to output post-projection values
results = {'l_fit': l_fit.numpy(), 'l_rec': l_rec.numpy(),
'l_l1': l1, 'nnz': nnz}
apply_optimizer(loss=rmd_loss, optimizer=optimizer_rmd,
tape=tape_rmd, models=models_rmd)
apply_optimizer(loss=rd_loss, optimizer=optimizer_rd,
tape=tape_rd, models=models_rd)
return results
@gin.configurable
def step_2opt(model, decoder, reconstructor, xs, ys,
optimizer_md, optimizer_rd,
l_rec_coeff=1.0,
l1_coeff=0.0,
rec_iters=1):
"""One optimization step."""
# xs - observations + actions
# ys - next observations
# converting dtype
xs = np.array(xs, dtype=np.float32)
ys = np.array(ys, dtype=np.float32)
# list of models
models_md = [model] # decoder weights are in the model
models_rd = [reconstructor, decoder]
for _ in range(rec_iters):
with tf.GradientTape() as tape_rd:
l_rec = loss_reconstructor(reconstructor=reconstructor,
decoder=decoder, x=xs)
apply_optimizer(loss=l_rec, optimizer=optimizer_rd,
tape=tape_rd, models=models_rd)
# left out deliberately, don't want to update on it.
l_l1 = tf.norm(flatten_array_of_tensors([model.weights[1]]),
ord=1)
with tf.GradientTape() as tape_md, tf.GradientTape() as tape_g1,\
tf.GradientTape() as tape_g2:
# Make prediction
y_pred = model(xs)
# Calculate loss
l_l1 = tf.norm(flatten_array_of_tensors([model.weights[1]]),
ord=1)
l_fit = loss_model_fit(ys, y_pred, decoder=decoder)
| |
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslo_concurrency import lockutils
from oslo_log import log
from stevedore import dispatch
from ironic.common import exception
from ironic.common.i18n import _LI, _LW
from ironic.conf import CONF
from ironic.drivers import base as driver_base
from ironic.drivers import fake_hardware
from ironic.drivers import hardware_type
LOG = log.getLogger(__name__)
EM_SEMAPHORE = 'extension_manager'
def build_driver_for_task(task, driver_name=None):
"""Builds a composable driver for a given task.
Starts with a `BareDriver` object, and attaches implementations of the
various driver interfaces to it. For classic drivers these all come from
the monolithic driver singleton, for hardware types - from separate
driver factories and are configurable via the database.
:param task: The task containing the node to build a driver for.
:param driver_name: The name of the classic driver or hardware type to use
as a base, if different than task.node.driver.
:returns: A driver object for the task.
:raises: DriverNotFound if node.driver could not be found in either
"ironic.drivers" or "ironic.hardware.types" namespaces.
:raises: InterfaceNotFoundInEntrypoint if some node interfaces are set
to invalid or unsupported values.
:raises: IncompatibleInterface if driver is a hardware type and
the requested implementation is not compatible with it.
"""
node = task.node
driver_name = driver_name or node.driver
driver_or_hw_type = get_driver_or_hardware_type(driver_name)
try:
check_and_update_node_interfaces(
node, driver_or_hw_type=driver_or_hw_type)
except exception.MustBeNone as e:
# NOTE(rloo). This was raised because nodes with classic drivers
# cannot have any interfaces (except for network and
# storage) set. However, there was a small window
# where this was possible so instead of breaking those
# users totally, we'll spam them with warnings instead.
LOG.warning(_LW('%s They will be ignored. To avoid this warning, '
'please set them to None.'), e)
bare_driver = driver_base.BareDriver()
_attach_interfaces_to_driver(bare_driver, node, driver_or_hw_type)
return bare_driver
def _attach_interfaces_to_driver(bare_driver, node, driver_or_hw_type):
"""Attach interface implementations to a bare driver object.
For classic drivers, copies implementations from the singleton driver
object, then attaches the dynamic interfaces (network and storage
interfaces for classic drivers, all interfaces for dynamic drivers
made of hardware types).
For hardware types, load all interface implementations dynamically.
:param bare_driver: BareDriver instance to attach interfaces to
:param node: Node object
:param driver_or_hw_type: classic driver or hardware type instance
:raises: InterfaceNotFoundInEntrypoint if the entry point was not found.
:raises: IncompatibleInterface if driver is a hardware type and
the requested implementation is not compatible with it.
"""
if isinstance(driver_or_hw_type, hardware_type.AbstractHardwareType):
# For hardware types all interfaces are dynamic
dynamic_interfaces = _INTERFACE_LOADERS
else:
# Copy implementations from the classic driver singleton
for iface in driver_or_hw_type.all_interfaces:
impl = getattr(driver_or_hw_type, iface, None)
setattr(bare_driver, iface, impl)
# NOTE(TheJulia): This list of interfaces to be applied
# to classic drivers, thus requiring separate treatment.
dynamic_interfaces = ['network', 'storage']
for iface in dynamic_interfaces:
impl_name = getattr(node, '%s_interface' % iface)
impl = get_interface(driver_or_hw_type, iface, impl_name)
setattr(bare_driver, iface, impl)
def get_interface(driver_or_hw_type, interface_type, interface_name):
"""Get interface implementation instance.
For hardware types also validates compatibility.
:param driver_or_hw_type: a hardware type or classic driver instance.
:param interface_type: name of the interface type (e.g. 'boot').
:param interface_name: name of the interface implementation from an
appropriate entry point
(ironic.hardware.interfaces.<interface type>).
:returns: instance of the requested interface implementation.
:raises: InterfaceNotFoundInEntrypoint if the entry point was not found.
:raises: IncompatibleInterface if driver_or_hw_type is a hardware type and
the requested implementation is not compatible with it.
"""
factory = _INTERFACE_LOADERS[interface_type]()
try:
impl_instance = factory.get_driver(interface_name)
except KeyError:
raise exception.InterfaceNotFoundInEntrypoint(
iface=interface_name,
entrypoint=factory._entrypoint_name,
valid=factory.names)
if not isinstance(driver_or_hw_type, hardware_type.AbstractHardwareType):
# NOTE(dtantsur): classic drivers do not have notion of compatibility
return impl_instance
if isinstance(driver_or_hw_type, fake_hardware.FakeHardware):
# NOTE(dtantsur): special-case fake hardware type to allow testing with
# any combinations of interface implementations.
return impl_instance
supported_impls = getattr(driver_or_hw_type,
'supported_%s_interfaces' % interface_type)
if type(impl_instance) not in supported_impls:
raise exception.IncompatibleInterface(
interface_type=interface_type, interface_impl=impl_instance,
hardware_type=driver_or_hw_type.__class__.__name__)
return impl_instance
def default_interface(driver_or_hw_type, interface_type,
driver_name=None, node=None):
"""Calculate and return the default interface implementation.
Finds the first implementation that is supported by the hardware type
and is enabled in the configuration.
:param driver_or_hw_type: classic driver or hardware type instance object.
:param interface_type: type of the interface (e.g. 'boot').
:param driver_name: entrypoint name of the driver_or_hw_type object. Is
used for exception message.
:param node: the identifier of a node. If specified, is used for exception
message.
:returns: an entrypoint name of the calculated default implementation.
:raises: InterfaceNotFoundInEntrypoint if the entry point was not found.
:raises: NoValidDefaultForInterface if no default interface can be found.
"""
factory = _INTERFACE_LOADERS[interface_type]
is_hardware_type = isinstance(driver_or_hw_type,
hardware_type.AbstractHardwareType)
# Explicit interface defaults
additional_defaults = {
'network': 'flat' if CONF.dhcp.dhcp_provider == 'neutron' else 'noop',
'storage': 'noop'
}
# The fallback default from the configuration
impl_name = getattr(CONF, 'default_%s_interface' % interface_type)
if impl_name is None:
impl_name = additional_defaults.get(interface_type)
if impl_name is not None:
# Check that the default is correct for this type
get_interface(driver_or_hw_type, interface_type, impl_name)
elif is_hardware_type:
supported = getattr(driver_or_hw_type,
'supported_%s_interfaces' % interface_type)
# Mapping of classes to entry points
enabled = {obj.__class__: name for (name, obj) in factory().items()}
# Order of the supported list matters
for impl_class in supported:
try:
impl_name = enabled[impl_class]
break
except KeyError:
continue
if impl_name is None:
# NOTE(rloo). No i18n on driver_type_str because translating substrings
# on their own may cause the final string to look odd.
if is_hardware_type:
driver_type_str = 'hardware type'
else:
driver_type_str = 'driver'
driver_name = driver_name or driver_or_hw_type.__class__.__name__
node_info = ""
if node is not None:
node_info = _(' node %s with') % node
raise exception.NoValidDefaultForInterface(
interface_type=interface_type, driver_type=driver_type_str,
driver=driver_name, node_info=node_info)
return impl_name
def check_and_update_node_interfaces(node, driver_or_hw_type=None):
"""Ensure that node interfaces (e.g. for creation or updating) are valid.
Updates (but doesn't save to the database) hardware interfaces with
calculated defaults, if they are not provided.
This function is run on node updating and creation, as well as each time
a driver instance is built for a node.
:param node: node object to check and potentially update
:param driver_or_hw_type: classic driver or hardware type instance object;
will be detected from node.driver if missing
:returns: True if any changes were made to the node, otherwise False
:raises: InterfaceNotFoundInEntrypoint on validation failure
:raises: NoValidDefaultForInterface if the default value cannot be
calculated and is not provided in the configuration
:raises: DriverNotFound if the node's driver or hardware type is not found
:raises: MustBeNone if one or more of the node's interface
fields were specified when they should not be.
"""
if driver_or_hw_type is None:
driver_or_hw_type = get_driver_or_hardware_type(node.driver)
is_hardware_type = isinstance(driver_or_hw_type,
hardware_type.AbstractHardwareType)
if is_hardware_type:
factories = _INTERFACE_LOADERS.keys()
else:
# Only network and storage interfaces are dynamic for classic drivers
factories = ['network', 'storage']
# These are interfaces that cannot be specified via the node. E.g.,
# for classic drivers, none are allowed except for network & storage.
not_allowed_ifaces = driver_base.ALL_INTERFACES - set(factories)
updates = node.obj_what_changed()
# Result - whether the node object was modified
result = False
bad_interface_fields = []
for iface in not_allowed_ifaces:
field_name = '%s_interface' % iface
# NOTE(vsaienko): reset *_interface fields that shouldn't exist for
# classic driver, only when driver was changed and field not set
# explicitly
if 'driver' in updates and field_name not in updates:
setattr(node, field_name, None)
result = True
# NOTE(dtantsur): objects raise NotImplementedError on accessing fields
# that are known, but missing from an object. Thus, we cannot just use
# getattr(node, field_name, None) here.
elif field_name in node:
impl_name = getattr(node, field_name)
if impl_name is not None:
bad_interface_fields.append(field_name)
if bad_interface_fields:
raise exception.MustBeNone(node=node.uuid, driver=node.driver,
node_fields=','.join(bad_interface_fields))
# Walk through all dynamic interfaces and check/update them
for iface in factories:
field_name = '%s_interface' % iface
# NOTE(dtantsur): objects raise NotImplementedError on accessing fields
# that are known, but | |
<reponame>pppyykknen/LFDisplay-PyTorch
import torch
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
def sign(x):
return torch.sign(x)
class Display:
def __init__(self, observerPositions, height=600, width=800, diffusionPower=[40, 0], halfPhysSize=[200, 150],
viewerDistance=400,
projectorResolution=[800, 600], primaryResolution=3):
self.DiffusionPower = diffusionPower
self.HalfPhysSize = halfPhysSize
self.ViewerDistance = viewerDistance
self.ProjectorResolution = projectorResolution
self.observerPositions = observerPositions
self.ProjectorPosition = self.observerPositions[0, :].reshape(1, 1, 1, 3).detach()
self.ori = torch.zeros(primaryResolution * primaryResolution, height, width, 3,
device=device)
self.direc = torch.zeros(primaryResolution * primaryResolution, height, width, 3,
device=device)
# ---
# these values are for non-pinhole rendering, not really needed
# Comment to save a bit of memory?
# self.d = torch.zeros(primaryResolution * primaryResolution, height, width, 3,
# device=device)
# self.d[:, :, :, 0] = 1
# self.outLambda = torch.zeros_like(self.direc[:, :, :, 0], device=device)
# self.z0 = self.ViewerDistance * torch.ones_like(self.outLambda, device=device)
# ----
self.MinX = -self.HalfPhysSize[0]
self.MinY = -self.HalfPhysSize[1]
self.MaxX = self.HalfPhysSize[0]
self.MaxY = self.HalfPhysSize[1]
self.ImagePlaneDepth = self.ViewerDistance
self.OriginX = observerPositions[0, 0]
self.OriginY = observerPositions[0, 1]
self.updateFlag = False
self.posX = 0
self.posY = 0
self.posZ = 0
def updateValues(self, viewNr):
self.OriginX = self.observerPositions[viewNr, 0]
self.OriginY = self.observerPositions[viewNr, 1]
self.ProjectorPosition = self.observerPositions[viewNr, :].reshape(1, 1, 1, 3).detach()
self.updateFlag = True
def updateRaster(self, raster):
lambdaX = raster[:, :, :, 0] / self.ProjectorResolution[0]
lambdaY = 1 - raster[:, :, :, 1] / self.ProjectorResolution[1]
self.posX = self.MinX + lambdaX * (self.MaxX - self.MinX)
self.posY = self.MinY + lambdaY * (self.MaxY - self.MinY)
self.posZ = self.ImagePlaneDepth
def GenerateRay(self, raster):
return 0
lambdaX = -1.0 + 2.0 * raster[:, :, :, 0] / self.ProjectorResolution[0]
lambdaY = 1.0 - 2.0 * raster[:, :, :, 1] / self.ProjectorResolution[1]
# screen location
x0 = lambdaX * self.HalfPhysSize[0]
y0 = lambdaY * self.HalfPhysSize[1]
z0 = self.z0.clone().detach()
xyz = torch.stack((x0, y0, z0), -1)
# viewer location
locOri = self.ProjectorPosition - xyz
locLineOri = -1 * xyz
X = self.FindMaxOnLine(locOri, locLineOri)
# this must be then pixel locaion
self.ori[:, :, :, 0] = X
self.ori[:, :, :, 1] = 0
self.ori[:, :, :, 2] = 0
self.direc[:, :, :, 0] = x0 - X
self.direc[:, :, :, 1] = y0
self.direc[:, :, :, 2] = z0
return self.ori, self.direc
def Diffusion(self, dirToProj, dirToEye):
dA = dirToProj
dB = dirToEye
signA = sign(dA[:, :, :, 2])
signB = sign(dB[:, :, :, 2])
rhoA = signA * dA[:, :, :, 0] / torch.sqrt(
dA[:, :, :, 1] * dA[:, :, :, 1] + dA[:, :, :, 2] * dA[:, :, :, 2] + 1e-12)
rhoB = signB * dB[:, :, :, 0] / torch.sqrt(
dB[:, :, :, 1] * dB[:, :, :, 1] + dB[:, :, :, 2] * dB[:, :, :, 2] + 1e-12)
diffRho = rhoA - rhoB
expArg = -self.DiffusionPower[0] * diffRho * diffRho
if self.DiffusionPower[1] > 0:
etaA = dA[:, :, :, 1] / (dA[:, :, :, 2] + 1e-12)
etaB = dB[:, :, :, 1] / (dB[:, :, :, 2] + 1e-12)
diffEta = etaA - etaB
# diffRho = np.arctan(diffRho)
# diffEta = np.arctan(diffEta)
expArg -= self.DiffusionPower[1] * diffEta * diffEta
return torch.exp(expArg).float()
def FindMaxOnLine(self, rayOri, lineOri):
signRay = sign(rayOri[:, :, :, 2])
signLine = sign(lineOri[:, :, :, 2])
rho = signRay * rayOri[:, :, :, 0] / torch.sqrt(
rayOri[:, :, :, 1] * rayOri[:, :, :, 1] + rayOri[:, :, :, 2] * rayOri[:, :, :, 2])
e = lineOri
# use case for d != [1,0,0]
# a = d[:, :, 0] * d[:, :, 0] - rho * rho * (d[:, :, 1] * d[:, :, 1] + d[:, :, 2] * d[:, :, 2])
#
# b = d[:, :, 0] * e[:, :, 0] - rho * rho * (d[:, :, 1] * e[:, :, 1] + d[:, :, 2] * e[:, :, 2])
#
# c = e[:, :, 0] * e[:, :, 0] - rho * rho * (e[:, :, 1] * e[:, :, 1] + e[:, :, 2] * e[:, :, 2])
#
# D = b * b - a * c
#
# # sometimes D can be less than zero..
# DD = D<0
# # D[D<0] = 0
# sqrtD = torch.sqrt(D)
# lambdaA = (-b + sqrtD) / a
# lambdaB = (-b - sqrtD) / a
lD = rho * torch.sqrt(e[:, :, :, 1] * e[:, :, :, 1] + e[:, :, :, 2] * e[:, :, :, 2])
lambdaA = -e[:, :, :, 0] + lD
lambdaB = -e[:, :, :, 0] - lD
lambdaA = lambdaA.unsqueeze(-1)
lambdaB = lambdaB.unsqueeze(-1)
posA = e + lambdaA * self.d
posB = e + lambdaB * self.d
signA = sign(posA[:, :, :, 2])
signB = sign(posB[:, :, :, 2])
firstCondition = signA == signLine
secondCondition = signB != signLine
thirdCondition = signA != signLine
fourthCondition = signB == signLine
lambdaA = torch.squeeze(lambdaA, -1)
lambdaB = torch.squeeze(lambdaB, -1)
outLambda = self.outLambda.clone()
outLambda[firstCondition * secondCondition] = lambdaA[firstCondition * secondCondition].float()
outLambda[thirdCondition * fourthCondition] = lambdaB[thirdCondition * fourthCondition].float()
stillZeros = outLambda == 0
rhoA = signA * posA[:, :, :, 0] / torch.sqrt(
posA[:, :, :, 1] * posA[:, :, :, 1] + posA[:, :, :, 2] * posA[:, :, :, 2])
rhoB = signB * posB[:, :, :, 0] / torch.sqrt(
posB[:, :, :, 1] * posB[:, :, :, 1] + posB[:, :, :, 2] * posB[:, :, :, 2])
fifthCondition = torch.abs(rhoA - rho) < torch.abs(rhoB - rho)
sixthCondition = ~fifthCondition
outLambda[fifthCondition * stillZeros] = lambdaA[fifthCondition * stillZeros].float()
outLambda[sixthCondition * stillZeros] = lambdaB[sixthCondition * stillZeros].float()
return outLambda
def GenerateRayPinHole(self):
if not self.updateFlag:
raise RuntimeError("Run display.updateValues first")
self.ori[:, :, :, 0] = self.OriginX
self.ori[:, :, :, 1] = self.OriginY
self.ori[:, :, :, 2] = 0
self.direc[:, :, :, 0] = self.posX - self.OriginX
self.direc[:, :, :, 1] = self.posY - self.OriginY
self.direc[:, :, :, 2] = self.posZ
return self.ori, self.direc
class Sampler:
def __init__(self, height=600, width=800, primaryResolution=3):
self.secondaryRes = 1
self.primaryResolution = primaryResolution
with torch.no_grad():
xs = torch.arange(0, width, device=device, dtype=torch.float32)
ys = torch.arange(0, height, device=device, dtype=torch.float32)
yv, xv = torch.meshgrid(ys, xs)
self.raster = torch.stack((xv, yv), dim=2).unsqueeze(0).repeat(
self.primaryResolution * self.primaryResolution, 1, 1, 1)
for index in range(0, self.primaryResolution * self.primaryResolution):
indY1 = index % self.primaryResolution
indX1 = index // self.primaryResolution % self.primaryResolution
dx1 = (0.5 + indX1) / self.primaryResolution
dy1 = (0.5 + indY1) / self.primaryResolution
self.raster[index, :, :, 0] += dx1
self.raster[index, :, :, 1] += dy1
self.raster[:, :, :, 0] = torch.clamp(self.raster[:, :, :, 0], 0, width)
self.raster[:, :, :, 1] = torch.clamp(self.raster[:, :, :, 1], 0, height)
def CurrentSample(self):
return self.raster.detach()
class Renderer:
def __init__(self, m_DisplayModel, sampler, projectorPositions, projectorImages, height=600, width=800,
primaryResolution=3):
self.viewerDistance = m_DisplayModel.ViewerDistance
self.halfSizeX = m_DisplayModel.HalfPhysSize[0]
self.halfSizeY = m_DisplayModel.HalfPhysSize[1]
self.sampler = sampler
self.m_DisplayModel = m_DisplayModel
self.projectorImages = projectorImages
self.projectorPositions = projectorPositions
self.primaryResolution = primaryResolution
self.height = height
self.width = width
self.m_DisplayModel.updateRaster(self.sampler.CurrentSample())
def updateProjectorImages(self, projectorImages, projectorPositions=None):
self.projectorImages = projectorImages.permute(1, 2, 0)
if projectorPositions is not None:
self.projectorPositions = projectorPositions
def projectPixels(self, xyz, ori):
weightSum = torch.zeros((1, self.height, self.width), device=device).unsqueeze(-1)
colorSum = torch.zeros((self.primaryResolution * self.primaryResolution, self.height, self.width, 3),
device=device)
# xProj = torch.arange(0, 800).unsqueeze(0).unsqueeze(0).repeat(1, 600, 1).to(device)
# yProj = torch.arange(0, 600).unsqueeze(0).unsqueeze(-1).repeat(1, 1, 800).to(device)
for projInd in range(self.projectorImages.size(2) // 3):
projPos = self.projectorPositions[projInd]
dirToProj = projPos - xyz
dirToEye = ori - xyz
weight = self.m_DisplayModel.Diffusion(dirToProj=dirToProj, dirToEye=dirToEye)
# print(self.projectorImages[:,:,:].size())
projColor = self.projectorImages[:, :, projInd * 3:(projInd + 1) * 3]
# projColor = self.projectorImages[yProj, xProj, projInd * 3:(projInd + 1) * 3]
weight = weight.unsqueeze(-1)
weight[torch.isnan(weight)] = 0
weight[weight < 0.00001] = 0
weight[weight > 1e12] = 0
# r = torch.cuda.memory_reserved(0)
# a = torch.cuda.memory_allocated(0)
# f = r - a # free inside reserved
# print(f)
colorSum = colorSum + weight * projColor
weightSum = weightSum + weight
tmp = colorSum / (weightSum + 1e-12)
tmp[(weightSum < 0.00001).expand(-1, -1, -1, 3)] = 0
return tmp.float()
def render(self, viewNr):
self.m_DisplayModel.updateValues(viewNr)
ori, direc = self.m_DisplayModel.GenerateRayPinHole()
z0 = self.viewerDistance
x0 = ori[:, :, :, 0] + (z0 - ori[:, :, :, 2]) * | |
E.isModellingError(x,y) :
# previously modelled as 0, we fix the error
E.delModellingError(x,y);
else :
E.delUnmodelledError(x,y);
E.cover(x,y);
else :
if E.isCovered(x,y) :
if not E.isModellingError(x,y) :
E.addModellingError(x,y);
else :
E.addModellingError(x,y);
E.cover(x,y)
if config.optModelZeroes == True :
# model non-shortcuts
for i_idx in range(st.numSpokes) :
i = st.sNodes[i_idx];
for j_idx in range(i_idx+1, st.numSpokes) :
j = st.sNodes[j_idx];
if not E.isExcluded(i,j) :
# only if (i,j) is not already modelled perfectly
if not E.isCovered(i,j) :
# edge not yet modelled
if G.hasEdge(i,j) :
# oops, there is an edge, but we say there aint
E.addModellingError(i,j);
#else :
# there is no edge, so we're good
E.cover(i,j);
#else :
# edge is modelled
#if G.hasEdge(i,j) and E.isModellingError(i,j) :
# model incorrect in saying there is no edge - no change
#if G.hasEdge(i,j) and not E.isModellingError(i,j) :
# model correct in saying there is an edge, no change
# ...
return;
# Encoded Size of a bi-partite core
def LbiPartiteCore(bc, M, G, E) :
# update Error
coverBiPartiteCore(G, E, bc);
cost = LN(bc.numLeftNodes) + LN(bc.numRightNodes);
cost += LU(G.numNodes, bc.numLeftNodes);
cost += LU(G.numNodes- bc.numLeftNodes, bc.numRightNodes);
return cost;
def coverBiPartiteCore(G, E, bc) :
# 1. fill in the 1s between the parts
for i in bc.lNodes :
for j in bc.rNodes :
if not E.isExcluded(i,j) :
# only if (i,j) is not already modelled perfectly
if G.hasEdge(i,j) :
# there is an edge
if E.isCovered(i,j) :
if E.isModellingError(i,j) :
# model says 0, we fix to 1
E.delModellingError(i,j);
else :
# model didnt say anything, we fix it
E.delUnmodelledError(i,j);
E.cover(i,j);
else :
# there is no edge
if E.isCovered(i,j) :
# but the cell is modelled
if not E.isModellingError(i,j) :
E.addModellingError(i,j); # we make a boo-boo
else :
# the cell is not modelled, yet
E.addModellingError(i,j);
E.cover(i,j);
# 2. fill in the 0s in left part
for i_idx in range(len(bc.lNodes)-1) :
i = bc.lNodes[i_idx];
for j_idx in range(i_idx+1,len(bc.lNodes)) :
j = bc.lNodes[j_idx];
if not E.isExcluded(i,j) and not E.isCovered(i,j) :
# only if (i,j) is not covered or already modelled perfectly
if E.isUnmodelledError(i,j) :
# edge exists!
E.delUnmodelledError(i,j); # we now model this cell
E.addModellingError(i,j); # but do so wrongly
E.cover(i,j);
# 3. fill in the 0s in right part
for i_idx in range(len(bc.rNodes)-1) :
i = bc.rNodes[i_idx];
for j_idx in range(i_idx+1,len(bc.rNodes)) :
j = bc.rNodes[j_idx];
if not E.isExcluded(i,j) and not E.isCovered(i,j) :
# only if (i,j) is not covered or already modelled perfectly
if E.isUnmodelledError(i,j) :
# edge exists!
E.delUnmodelledError(i,j); # we now model this cell
E.addModellingError(i,j); # but do so wrongly
E.cover(i,j);
return;
# Encoded Size of a near bi-partite core
def LnearBiPartiteCore(nb, M, G, E) :
# update Error
(cnt0,cnt1) = coverNearBiPartiteCore(G, E, nb);
# encode number of nodes in sets A and B
cost = LN(nb.numLeftNodes) + LN(nb.numRightNodes);
# encode node ids of sets A and B
cost += LU(G.numNodes, nb.numLeftNodes);
cost += LU(G.numNodes- nb.numLeftNodes, nb.numRightNodes);
if cnt0+cnt1 > 0 :
# encode probability of a 1 between sets A and B
cost += log(cnt0+cnt1, 2);
# encode the actual edges between A and B
cost += LnU(cnt0+cnt1, cnt1);
return cost;
def coverNearBiPartiteCore(G, E, nb) :
# first encode the edges between the parts
cnt0 = 0;
cnt1 = 0;
for i_idx in range(nb.numLeftNodes) :
i = nb.lNodes[i_idx];
for j_idx in range(nb.numRightNodes) :
j = nb.rNodes[j_idx];
if not E.isExcluded(i,j) :
# only if (i,j) is not already modelled perfectly
if not E.isCovered(i,j) :
# edge is not modelled yet
if G.hasEdge(i,j) :
# yet there is a real edge, so now we undo an error
E.delUnmodelledError(i,j);
E.coverAndExclude(i,j);
else :
# edge is already modelled
if E.isModellingError(i,j) :
# but wrongly, we undo that error
E.delModellingError(i,j);
E.exclude(i,j)
if G.hasEdge(i,j) :
cnt1 += 1;
else:
cnt0 += 1;
# 2. fill in the 0s in left part
for i_idx in range(len(nb.lNodes)-1) :
i = nb.lNodes[i_idx];
for j_idx in range(i_idx+1,len(nb.lNodes)) :
j = nb.lNodes[j_idx];
if not E.isExcluded(i,j) and not E.isCovered(i,j) :
# only if (i,j) is not covered or already modelled perfectly
if E.isUnmodelledError(i,j) :
# edge exists!
E.delUnmodelledError(i,j); # we now model this cell
E.addModellingError(i,j); # but do so wrongly
E.cover(i,j);
# 3. fill in the 0s in right part
for i_idx in range(len(nb.rNodes)-1) :
i = nb.rNodes[i_idx];
for j_idx in range(i_idx+1,len(nb.rNodes)) :
j = nb.rNodes[j_idx];
if not E.isExcluded(i,j) and not E.isCovered(i,j) :
# only if (i,j) is not covered or already modelled perfectly
if E.isUnmodelledError(i,j) :
# edge exists!
E.delUnmodelledError(i,j); # we now model this cell
E.addModellingError(i,j); # but do so wrongly
E.cover(i,j);
return (cnt0,cnt1);
# Encoded Size of a jellyfish structure
def LjellyFish(jf, M, G, E) :
# update Error
coverJellyFish(G, E, jf);
cost = LN(jf.numCores); # number of core nodes
cost += LU(G.numNodes, jf.numCores); # core node ids
cost += LN(jf.numSpokeSum) + LC(jf.numSpokeSum, jf.numCores); # number of spokes per core node
cost += LU(G.numNodes - jf.numCores, jf.numSpokeSum); # spoke ids (-no- overlap between sets!)
return cost;
def coverJellyFish(G, E, jf) :
# first link up the nodes in the core
for i_idx in range(len(jf.cNodes)) :
i = jf.cNodes[i_idx];
for j_idx in range(i_idx+1,len(jf.cNodes)) :
j = jf.cNodes[j_idx];
if not E.isExcluded(i,j) :
# only if (i,j) is not already modelled perfectly
if G.hasEdge(i,j) :
# there is an edge
if E.isCovered(i,j) :
if E.isModellingError(i,j) :
E.delModellingError(i,j); # model said 0, but we say 1
else :
# edge is there, but not covered, we fix it!
E.delUnmodelledError(i,j);
E.cover(i,j);
else :
# there is no edge
if E.isCovered(i,j) :
if not E.isModellingError(i,j) :
E.addModellingError(i,j); # model said 0, we say 1
else :
E.addModellingError(i,j);
E.cover(i,j);
# 2. link up the core nodes up to their respective spokes
for i_idx in range(len(jf.cNodes)) :
i = jf.cNodes[i_idx];
for j_idx in range(len(jf.sNodes[i_idx])) :
j = jf.sNodes[i_idx][j_idx];
if not E.isExcluded(i,j) :
# only if (i,j) is not already modelled perfectly
if G.hasEdge(i,j) :
# there is an edge
if E.isCovered(i,j) :
if E.isModellingError(i,j) :
E.delModellingError(i,j); # model said 0, we fix to 1
else :
# edge is there, but not covered, we fix it
E.delUnmodelledError(i,j);
E.cover(i,j);
else :
# there is no edge
if E.isCovered(i,j) :
if not E.isModellingError(i,j) :
E.addModellingError(i,j); # model said 0, but we say 1
else :
E.addModellingError(i,j);
E.cover(i,j);
if config.optModelZeroes == True :
# 3. model that the spokes within a set are not connected
# !!! code can be made more efficient, by incorporating it in previous loop
for i_idx in range(len(jf.cNodes)) :
for j_idx in range(len(jf.sNodes[i_idx])-1) :
j = jf.sNodes[i_idx][j_idx];
for k_idx in range(j_idx+1,len(jf.sNodes[i_idx])) :
k = jf.sNodes[i_idx][k_idx];
if not E.isExcluded(j,k) :
# only if (i,j) is not already modelled perfectly
#if E.isModelled(j,k) :
# we don't change previous modelling, but
if not E.isModelled(j,k) :
# cell not yet modelled, and should be a 0
if G.hasEdge(j,k) :
# but, it has a 1, change it to modelling error
E.delUnmodelledError(j,k);
E.addModellingError(j,k);
E.cover(j,k);
return;
# Encoded Size of a core periphery
def LcorePeriphery(cp, M, G, E) :
# update Error
coverCorePeriphery(G, E, cp);
cost = LN(cp.numCores); # number of core-nodes
cost += LN(cp.numSpokes); # number of spoke-nodes
cost += cp.numCores * log(G.numNodes, 2); # identify core-nodes
cost += cp.numSpokes * log(G.numNodes - cp.numCores, 2); # identify spoke-nodes
return cost;
# check whether ok
def coverCorePeriphery(G, E, cp) :
for i in cp.cNodes :
for j in cp.sNodes :
if not E.isModelled(i,j) :
if G.hasEdge(i,j) :
E.delUnmodelledError(i,j);
else :
E.addModellingError(i,j);
E.cover(i,j);
return;
# Encoded Size of a core periphery (a bit smarter)
def LcorePeripheryA(cp, M, G, E) :
cost = LN(cp.numCoreNodes); # number of core-nodes
cost += LN(cp.numSpokes); # number of spoke-nodes
cost += LU(G.numNodes, cp.numCoreNodes); # identify core-nodes
cost += LU(G.numNodes - cp.numCoreNodes, cp.numSpokes); # identify spoke-nodes
return cost;
### | |
# -*- coding: utf-8 -*-
from .. import config
from .utils import *
from .._constants import *
import theano
import theano.tensor as tt
import numpy as np
from scipy.linalg import block_diag as scipy_block_diag
import theano.tensor.slinalg as sla
import scipy
__all__ = ["lazy_math", "greedy_math", "lazy_linalg", "greedy_linalg"]
# Cholesky solve
_solve_lower = sla.Solve(A_structure="lower_triangular", lower=True)
_solve_upper = sla.Solve(A_structure="upper_triangular", lower=False)
def _cho_solve(cho_A, b):
return _solve_upper(tt.transpose(cho_A), _solve_lower(cho_A, b))
def _get_covariance(math, linalg, C=None, cho_C=None, N=None):
"""A container for covariance matrices.
Args:
C (scalar, vector, or matrix, optional): The covariance.
Defaults to None.
cho_C (matrix, optional): The lower Cholesky factorization of
the covariance. Defaults to None.
N (int, optional): The number of rows/columns in the covariance
matrix, required if ``C`` is a scalar. Defaults to None.
"""
# User provided the Cholesky factorization
if cho_C is not None:
cholesky = math.cast(cho_C)
value = math.dot(cholesky, math.transpose(cholesky))
inverse = linalg.cho_solve(cholesky, math.eye(cholesky.shape[0]))
lndet = 2 * math.sum(math.log(math.diag(cholesky)))
kind = "cholesky"
N = cho_C.shape[0]
# User provided the covariance as a scalar, vector, or matrix
elif C is not None:
C = math.cast(C)
if hasattr(C, "ndim"):
if C.ndim == 0:
assert N is not None, "Please provide a matrix size `N`."
cholesky = math.sqrt(C)
inverse = math.cast(1.0 / C)
lndet = math.cast(N * math.log(C))
value = C
kind = "scalar"
elif C.ndim == 1:
cholesky = math.sqrt(C)
inverse = 1.0 / C
lndet = math.sum(math.log(C))
value = C
kind = "vector"
N = C.shape[0]
else:
cholesky = math.cholesky(C)
inverse = linalg.cho_solve(cholesky, math.eye(C.shape[0]))
lndet = 2 * math.sum(math.log(math.diag(cholesky)))
value = C
kind = "matrix"
N = C.shape[0]
# Assume it's a scalar
else:
assert N is not None, "Please provide a matrix size `N`."
cholesky = math.sqrt(C)
inverse = math.cast(1.0 / C)
lndet = math.cast(N * math.log(C))
value = C
kind = "scalar"
# ?!
else:
raise ValueError(
"Either the covariance or its Cholesky factorization must be provided."
)
return value, cholesky, inverse, lndet, kind, N
class MathType(type):
"""Wrapper for theano/numpy functions."""
def cholesky(cls, *args, **kwargs):
if cls.lazy:
return sla.cholesky(*args, **kwargs)
else:
return scipy.linalg.cholesky(*args, **kwargs, lower=True)
def atleast_2d(cls, arg):
if cls.lazy:
return arg * tt.ones((1, 1))
else:
return np.atleast_2d(arg)
def vectorize(cls, *args):
"""
Vectorize all ``args`` so that they have the same length
along the first axis.
TODO: Add error catching if the dimensions don't agree.
"""
if cls.lazy:
args = [arg * tt.ones(1) for arg in args]
size = tt.max([arg.shape[0] for arg in args])
args = [tt.repeat(arg, size // arg.shape[0], 0) for arg in args]
else:
args = [np.atleast_1d(arg) for arg in args]
size = np.max([arg.shape[0] for arg in args])
args = tuple(
[
arg
* np.ones(
(size,) + tuple(np.ones(len(arg.shape) - 1, dtype=int))
)
for arg in args
]
)
if len(args) == 1:
return args[0]
else:
return args
def cross(cls, x, y):
"""Cross product of two 3-vectors.
Based on ``https://github.com/Theano/Theano/pull/3008``
"""
if cls.lazy:
eijk = np.zeros((3, 3, 3))
eijk[0, 1, 2] = eijk[1, 2, 0] = eijk[2, 0, 1] = 1
eijk[0, 2, 1] = eijk[2, 1, 0] = eijk[1, 0, 2] = -1
return tt.as_tensor_variable(tt.dot(tt.dot(eijk, y), x))
else:
return np.cross(x, y)
def cast(cls, *args):
if cls.lazy:
return cls.to_tensor(*args)
else:
if len(args) == 1:
return np.array(args[0], dtype=tt.config.floatX)
else:
return [np.array(arg, dtype=tt.config.floatX) for arg in args]
def to_array_or_tensor(cls, x):
if cls.lazy:
return tt.as_tensor_variable(x)
else:
return np.array(x)
def block_diag(cls, *mats):
if cls.lazy:
N = [mat.shape[0] for mat in mats]
Nsum = tt.sum(N)
res = tt.zeros((Nsum, Nsum), dtype=tt.config.floatX)
n = 0
for mat in mats:
inds = slice(n, n + mat.shape[0])
res = tt.set_subtensor(res[tuple((inds, inds))], mat)
n += mat.shape[0]
return res
else:
return scipy_block_diag(*mats)
def to_tensor(cls, *args):
"""Convert all ``args`` to Theano tensor variables.
Converts to tensor regardless of whether `cls.lazy` is True or False.
"""
if len(args) == 1:
return tt.as_tensor_variable(args[0]).astype(tt.config.floatX)
else:
return [
tt.as_tensor_variable(arg).astype(tt.config.floatX)
for arg in args
]
def __getattr__(cls, attr):
if cls.lazy:
return getattr(tt, attr)
else:
return getattr(np, attr)
class LinAlgType(type):
"""Linear algebra operations."""
@autocompile
def cho_solve(self, cho_A, b):
return _cho_solve(cho_A, b)
@autocompile
def solve(self, X, flux, cho_C, mu, LInv):
"""
Compute the maximum a posteriori (MAP) prediction for the
spherical harmonic coefficients of a map given a flux timeseries.
Args:
X (matrix): The flux design matrix.
flux (array): The flux timeseries.
cho_C (scalar/vector/matrix): The lower cholesky factorization
of the data covariance.
mu (array): The prior mean of the spherical harmonic coefficients.
LInv (scalar/vector/matrix): The inverse prior covariance of the
spherical harmonic coefficients.
Returns:
The vector of spherical harmonic coefficients corresponding to the
MAP solution and the Cholesky factorization of the corresponding
covariance matrix.
"""
# TODO: These if statements won't play well with @autocompile!!!
# Compute C^-1 . X
if cho_C.ndim == 0:
CInvX = X / cho_C ** 2
elif cho_C.ndim == 1:
CInvX = tt.dot(tt.diag(1 / cho_C ** 2), X)
else:
CInvX = _cho_solve(cho_C, X)
# Compute W = X^T . C^-1 . X + L^-1
W = tt.dot(tt.transpose(X), CInvX)
if LInv.ndim == 0:
W = tt.inc_subtensor(
W[tuple((tt.arange(W.shape[0]), tt.arange(W.shape[0])))], LInv
)
LInvmu = mu * LInv
elif LInv.ndim == 1:
W = tt.inc_subtensor(
W[tuple((tt.arange(W.shape[0]), tt.arange(W.shape[0])))], LInv
)
LInvmu = mu * LInv
else:
W += LInv
LInvmu = tt.dot(LInv, mu)
# Compute the max like y and its covariance matrix
cho_W = sla.cholesky(W)
M = _cho_solve(cho_W, tt.transpose(CInvX))
yhat = tt.dot(M, flux) + _cho_solve(cho_W, LInvmu)
ycov = _cho_solve(cho_W, tt.eye(cho_W.shape[0]))
cho_ycov = sla.cholesky(ycov)
return yhat, cho_ycov
@autocompile
def lnlike(cls, X, flux, C, mu, L):
"""
Compute the log marginal likelihood of the data given a design matrix.
Args:
X (matrix): The flux design matrix.
flux (array): The flux timeseries.
C (scalar/vector/matrix): The data covariance matrix.
mu (array): The prior mean of the spherical harmonic coefficients.
L (scalar/vector/matrix): The prior covariance of the spherical
harmonic coefficients.
Returns:
The log marginal likelihood of the `flux` vector conditioned on
the design matrix `X`. This is the likelihood marginalized over
all possible spherical harmonic vectors, which is analytically
computable for the linear `starry` model.
"""
# TODO: These if statements won't play well with @autocompile!!!
# Compute the GP mean
gp_mu = tt.dot(X, mu)
# Compute the GP covariance
if L.ndim == 0:
XLX = tt.dot(X, tt.transpose(X)) * L
elif L.ndim == 1:
XLX = tt.dot(tt.dot(X, tt.diag(L)), tt.transpose(X))
else:
XLX = tt.dot(tt.dot(X, L), tt.transpose(X))
if C.ndim == 0 or C.ndim == 1:
gp_cov = tt.inc_subtensor(
XLX[tuple((tt.arange(XLX.shape[0]), tt.arange(XLX.shape[0])))],
C,
)
else:
gp_cov = C + XLX
cho_gp_cov = sla.cholesky(gp_cov)
# Compute the marginal likelihood
N = X.shape[0]
r = tt.reshape(flux - gp_mu, (-1, 1))
lnlike = -0.5 * tt.dot(tt.transpose(r), _cho_solve(cho_gp_cov, r))
lnlike -= tt.sum(tt.log(tt.diag(cho_gp_cov)))
lnlike -= 0.5 * N * tt.log(2 * np.pi)
return lnlike[0, 0]
@autocompile
def lnlike_woodbury(cls, X, flux, CInv, mu, LInv, lndetC, lndetL):
"""
Compute the log marginal likelihood of the data given a design matrix
using the Woodbury identity.
Args:
X (matrix): The flux design matrix.
flux (array): The flux timeseries.
CInv (scalar/vector/matrix): The inverse data covariance matrix.
mu (array): The prior mean of the spherical harmonic coefficients.
L (scalar/vector/matrix): The inverse prior covariance of the
spherical harmonic coefficients.
Returns:
The log marginal likelihood of the `flux` vector conditioned on
the design matrix `X`. This is the likelihood marginalized over
all possible spherical harmonic vectors, which is analytically
computable for the linear `starry` model.
"""
# TODO: These if statements won't play well with @autocompile!!!
# Compute the GP mean
gp_mu = tt.dot(X, mu)
# Residual vector
r = tt.reshape(flux - gp_mu, (-1, 1))
# Inverse of GP covariance via Woodbury identity
if CInv.ndim == 0:
U = X * CInv
elif CInv.ndim == 1:
U = tt.dot(tt.diag(CInv), X)
else:
U = tt.dot(CInv, X)
if LInv.ndim == 0:
W = tt.dot(tt.transpose(X), U) + LInv * tt.eye(U.shape[1])
elif LInv.ndim == 1:
W = tt.dot(tt.transpose(X), U) + tt.diag(LInv)
else:
W = tt.dot(tt.transpose(X), U) + LInv
cho_W = sla.cholesky(W)
if CInv.ndim == 0:
SInv = CInv * tt.eye(U.shape[0]) - tt.dot(
U, _cho_solve(cho_W, tt.transpose(U))
)
elif CInv.ndim == 1:
SInv = tt.diag(CInv) - tt.dot(
U, _cho_solve(cho_W, tt.transpose(U))
)
else:
SInv | |
if s_rep is None or not s_rep.is_present() or \
(not n_rep.is_ro() and s_rep.is_partial()):
t_repsFrom.to_be_deleted = True
continue
# If the KCC did not remove t from n!repsFrom, it updates t
self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
# Loop thru connections and add implied repsFrom tuples
# for each NTDSConnection under our local DSA if the
# repsFrom is not already present
for cn_conn in current_dsa.connect_table.values():
s_dsa = self.get_dsa_for_implied_replica(n_rep, cn_conn)
if s_dsa is None:
continue
# Loop thru the existing repsFrom tupples (if any) and
# if we already have a tuple for this connection then
# no need to proceed to add. It will have been changed
# to have the correct attributes above
for t_repsFrom in n_rep.rep_repsFrom:
guidstr = str(t_repsFrom.source_dsa_obj_guid)
if s_dsa is self.get_dsa_by_guidstr(guidstr):
s_dsa = None
break
if s_dsa is None:
continue
# Create a new RepsFromTo and proceed to modify
# it according to specification
t_repsFrom = RepsFromTo(n_rep.nc_dnstr)
t_repsFrom.source_dsa_obj_guid = s_dsa.dsa_guid
s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
# Add to our NC repsFrom as this is newly computed
if t_repsFrom.is_modified():
n_rep.rep_repsFrom.append(t_repsFrom)
if self.readonly:
# Display any to be deleted or modified repsFrom
text = n_rep.dumpstr_to_be_deleted()
if text:
logger.info("TO BE DELETED:\n%s" % text)
text = n_rep.dumpstr_to_be_modified()
if text:
logger.info("TO BE MODIFIED:\n%s" % text)
# Peform deletion from our tables but perform
# no database modification
n_rep.commit_repsFrom(self.samdb, ro=True)
else:
# Commit any modified repsFrom to the NC replica
n_rep.commit_repsFrom(self.samdb)
def merge_failed_links(self, ping=None):
"""Merge of kCCFailedLinks and kCCFailedLinks from bridgeheads.
The KCC on a writable DC attempts to merge the link and connection
failure information from bridgehead DCs in its own site to help it
identify failed bridgehead DCs.
Based on MS-ADTS 6.2.2.3.2 "Merge of kCCFailedLinks and kCCFailedLinks
from Bridgeheads"
:param ping: An oracle of current bridgehead availability
:return: None
"""
# 1. Queries every bridgehead server in your site (other than yourself)
# 2. For every ntDSConnection that references a server in a different
# site merge all the failure info
#
# XXX - not implemented yet
if ping is not None:
debug.DEBUG_RED("merge_failed_links() is NOT IMPLEMENTED")
else:
DEBUG_FN("skipping merge_failed_links() because it requires "
"real network connections\n"
"and we weren't asked to --attempt-live-connections")
def setup_graph(self, part):
"""Set up an intersite graph
An intersite graph has a Vertex for each site object, a
MultiEdge for each SiteLink object, and a MutliEdgeSet for
each siteLinkBridge object (or implied siteLinkBridge). It
reflects the intersite topology in a slightly more abstract
graph form.
Roughly corresponds to MS-ADTS 6.2.2.3.4.3
:param part: a Partition object
:returns: an InterSiteGraph object
"""
# If 'Bridge all site links' is enabled and Win2k3 bridges required
# is not set
# NTDSTRANSPORT_OPT_BRIDGES_REQUIRED 0x00000002
# No documentation for this however, ntdsapi.h appears to have:
# NTDSSETTINGS_OPT_W2K3_BRIDGES_REQUIRED = 0x00001000
bridges_required = self.my_site.site_options & 0x00001002 != 0
transport_guid = str(self.ip_transport.guid)
g = setup_graph(part, self.site_table, transport_guid,
self.sitelink_table, bridges_required)
if self.verify or self.dot_file_dir is not None:
dot_edges = []
for edge in g.edges:
for a, b in itertools.combinations(edge.vertices, 2):
dot_edges.append((a.site.site_dnstr, b.site.site_dnstr))
verify_properties = ()
name = 'site_edges_%s' % part.partstr
verify_and_dot(name, dot_edges, directed=False,
label=self.my_dsa_dnstr,
properties=verify_properties, debug=DEBUG,
verify=self.verify,
dot_file_dir=self.dot_file_dir)
return g
def get_bridgehead(self, site, part, transport, partial_ok, detect_failed):
"""Get a bridghead DC for a site.
Part of MS-ADTS 6.2.2.3.4.4
:param site: site object representing for which a bridgehead
DC is desired.
:param part: crossRef for NC to replicate.
:param transport: interSiteTransport object for replication
traffic.
:param partial_ok: True if a DC containing a partial
replica or a full replica will suffice, False if only
a full replica will suffice.
:param detect_failed: True to detect failed DCs and route
replication traffic around them, False to assume no DC
has failed.
:return: dsa object for the bridgehead DC or None
"""
bhs = self.get_all_bridgeheads(site, part, transport,
partial_ok, detect_failed)
if len(bhs) == 0:
debug.DEBUG_MAGENTA("get_bridgehead:\n\tsitedn=%s\n\tbhdn=None" %
site.site_dnstr)
return None
else:
debug.DEBUG_GREEN("get_bridgehead:\n\tsitedn=%s\n\tbhdn=%s" %
(site.site_dnstr, bhs[0].dsa_dnstr))
return bhs[0]
def get_all_bridgeheads(self, site, part, transport,
partial_ok, detect_failed):
"""Get all bridghead DCs on a site satisfying the given criteria
Part of MS-ADTS 6.2.2.3.4.4
:param site: site object representing the site for which
bridgehead DCs are desired.
:param part: partition for NC to replicate.
:param transport: interSiteTransport object for
replication traffic.
:param partial_ok: True if a DC containing a partial
replica or a full replica will suffice, False if
only a full replica will suffice.
:param detect_failed: True to detect failed DCs and route
replication traffic around them, FALSE to assume
no DC has failed.
:return: list of dsa object for available bridgehead DCs
"""
bhs = []
if transport.name != "IP":
raise KCCError("get_all_bridgeheads has run into a "
"non-IP transport! %r"
% (transport.name,))
DEBUG_FN("get_all_bridgeheads")
DEBUG_FN(site.rw_dsa_table)
for dsa in site.rw_dsa_table.values():
pdnstr = dsa.get_parent_dnstr()
# IF t!bridgeheadServerListBL has one or more values and
# t!bridgeheadServerListBL does not contain a reference
# to the parent object of dc then skip dc
if ((len(transport.bridgehead_list) != 0 and
pdnstr not in transport.bridgehead_list)):
continue
# IF dc is in the same site as the local DC
# IF a replica of cr!nCName is not in the set of NC replicas
# that "should be present" on dc or a partial replica of the
# NC "should be present" but partialReplicasOkay = FALSE
# Skip dc
if self.my_site.same_site(dsa):
needed, ro, partial = part.should_be_present(dsa)
if not needed or (partial and not partial_ok):
continue
rep = dsa.get_current_replica(part.nc_dnstr)
# ELSE
# IF an NC replica of cr!nCName is not in the set of NC
# replicas that "are present" on dc or a partial replica of
# the NC "is present" but partialReplicasOkay = FALSE
# Skip dc
else:
rep = dsa.get_current_replica(part.nc_dnstr)
if rep is None or (rep.is_partial() and not partial_ok):
continue
# IF AmIRODC() and cr!nCName corresponds to default NC then
# Let dsaobj be the nTDSDSA object of the dc
# IF dsaobj.msDS-Behavior-Version < DS_DOMAIN_FUNCTION_2008
# Skip dc
if self.my_dsa.is_ro() and rep is not None and rep.is_default():
if not dsa.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
continue
# IF BridgeheadDCFailed(dc!objectGUID, detectFailedDCs) = TRUE
# Skip dc
if self.is_bridgehead_failed(dsa, detect_failed):
DEBUG("bridgehead is failed")
continue
DEBUG_FN("get_all_bridgeheads: dsadn=%s" % dsa.dsa_dnstr)
bhs.append(dsa)
# IF bit NTDSSETTINGS_OPT_IS_RAND_BH_SELECTION_DISABLED is set in
# s!options
# SORT bhs such that all GC servers precede DCs that are not GC
# servers, and otherwise by ascending objectGUID
# ELSE
# SORT bhs in a random order
if site.is_random_bridgehead_disabled():
bhs.sort(sort_dsa_by_gc_and_guid)
else:
random.shuffle(bhs)
debug.DEBUG_YELLOW(bhs)
return bhs
def is_bridgehead_failed(self, dsa, detect_failed):
"""Determine whether a given DC is known to be in a failed state
:param dsa: the bridgehead to test
:param detect_failed: True to really check, False to assume no failure
:return: True if and only if the DC should be considered failed
Here we DEPART from the pseudo code spec which appears to be
wrong. It says, in full:
/***** BridgeheadDCFailed *****/
/* Determine whether a given DC is known to be in a failed state.
* IN: objectGUID - objectGUID of the DC's nTDSDSA object.
* IN: detectFailedDCs - TRUE if and only failed DC detection is
* enabled.
* RETURNS: TRUE if and only if the DC should be considered to be in a
* failed state.
*/
BridgeheadDCFailed(IN GUID objectGUID, IN bool detectFailedDCs) : bool
{
IF bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set in
the options attribute of the site settings object for the local
DC's site
RETURN FALSE
ELSEIF a tuple z exists in the kCCFailedLinks or
kCCFailedConnections variables such that z.UUIDDsa =
objectGUID, z.FailureCount > 1, and the current time -
z.TimeFirstFailure > 2 hours
RETURN TRUE
ELSE
RETURN detectFailedDCs
ENDIF
}
where you will see detectFailedDCs is not behaving as
advertised -- it is acting as a default return code in the
event that a failure is not detected, not a switch turning
detection on or off. Elsewhere the documentation seems to
concur with the comment rather than the code.
"""
if not detect_failed:
return False
# NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED = 0x00000008
# When DETECT_STALE_DISABLED, we can | |
import types
import time
import hashlib
import re
from urlparse import urljoin
from threading import Lock
from externals.reqresp import Request
from framework.core.myexception import FuzzException
from framework.fuzzer.base import BaseFuzzRequest
class FuzzRequest(BaseFuzzRequest, Request):
def __init__(self):
Request.__init__(self)
self._rlevel = 0
self._proxy = None
self._allvars = None
self._is_baseline = False
self._fuzz_methods = False
self._description = ""
# read only methods for accessing HTTP requests information consistenly accross the codebase
def fr_headers(self):
h = dict(request = {}, response = {})
h['request'] = dict(map(lambda x: x.split(":",1), self.getHeaders()))
if self.response:
h['response'] = dict(self.response.getHeaders())
return h
def fr_parameters(self):
p = dict(get = {}, post = {})
p['get'] = dict(map(lambda x: (x.name, x.value), self.getGETVars()))
p['post'] = dict(map(lambda x: (x.name, x.value), self.getPOSTVars()))
return p
def fr_cookies(self):
cc = dict(request = {}, response = {})
if self['Cookie']:
c = self['Cookie'].split("; ")
if c[0]:
#cc['request'] = dict(map(lambda x: x.split("=", 1), c))
cc['request'] = dict(map(lambda x:[x[0],x[2]],map(lambda x:x.partition("="), c)))
if self.response:
c = self.response.getCookie().split("; ")
if c[0]:
#cc['response'] = dict(map(lambda x: x.split("=", 1), c))
cc['response'] = dict(map(lambda x:[x[0],x[2]],map(lambda x:x.partition("="), c)))
return cc
def fr_method(self):
return self.method
def fr_schema(self):
return self.schema
def fr_host(self):
return self.getHost()
def fr_url(self):
return self.finalUrl
def fr_redirect_url(self):
return self.completeUrl
def fr_content(self):
return self.response.getContent() if self.response else ""
def fr_code(self):
return self.response.code if self.response else None
def fr_auth(self):
return self.getAuth()
def fr_follow(self):
return self.followLocation
def fr_time(self):
return self.totaltime
# Info extra that wfuzz needs within an HTTP request
def _get_baseline(self):
return self._is_baseline
def _set_baseline(self, bl):
self._is_baseline = bl
wf_is_baseline = property( _get_baseline, _set_baseline )
def wf_allvars_len(self):
if self.wf_allvars == "allvars":
varSET = self.getGETVars()
elif self.wf_allvars == "allpost":
varSET = self.getPOSTVars()
else:
raise FuzzException(FuzzException.FATAL, "Unknown variable set: " + self.wf_allvars)
return len(varSET)
def _get_allvars(self):
return self._allvars
def _set_allvars(self, bl):
if bl is not None and bl not in ['allvars', 'allpost']:
raise FuzzException(FuzzException.FATAL, "Incorrect all parameters brute forcing type specified, correct values are allvars, allpost or allheaders.")
self._allvars = bl
wf_allvars = property( _get_allvars, _set_allvars )
def _set_rlevel(self, l):
self._rlevel = l
def _get_rlevel(self):
return self._rlevel
rlevel = property( _get_rlevel, _set_rlevel )
def _set_fuzz_methods(self, l):
self._fuzz_methods = l
def _get_fuzz_methods(self):
return self._fuzz_methods
wf_fuzz_methods = property( _get_fuzz_methods, _set_fuzz_methods )
def _set_description(self, l):
self._description = l
def _get_description(self):
return self._description
wf_description = property( _get_description, _set_description )
def _set_proxies(self, l):
if l:
prox, ptype = l
self.setProxy("%s" % prox, ptype if ptype else "HTML")
self._proxy = l
def _get_proxies(self):
return self._proxy
wf_proxy = property( _get_proxies, _set_proxies )
# methods wfuzz needs to perform HTTP requests (this might change in the future).
def to_http_object(self, c):
return Request.to_pycurl_object(c, self)
def from_http_object(self, c, bh, bb):
return self.response_from_conn_object(c, bh, bb)
# methods wfuzz needs for substituing payloads and building dictionaries
@staticmethod
def from_seed(seed, payload):
rawReq = seed.getAll()
schema = seed.schema
method, userpass = seed.getAuth()
http_method = None
marker_regex = re.compile("FUZ\d*Z",re.MULTILINE|re.DOTALL)
fuzz_words = len(set(marker_regex.findall(rawReq)))
if seed.wf_fuzz_methods:
fuzz_words += 1
if method:
fuzz_words += len(set(marker_regex.findall(userpass)))
if len(payload) != fuzz_words:
raise FuzzException(FuzzException.FATAL, "FUZZ words and number of payloads do not match!")
newreq = seed.from_copy()
rawUrl = newreq.completeUrl
for payload_pos, payload_content in enumerate(payload, start=1):
fuzz_word = "FUZ" + str(payload_pos) + "Z" if payload_pos > 1 else "FUZZ"
if newreq.wf_description:
newreq.wf_description += " - "
newreq.wf_description += payload_content
if seed.wf_fuzz_methods and fuzz_word == "FUZZ":
http_method = payload_content
elif method and (userpass.count(fuzz_word)):
userpass = userpass.replace(fuzz_word, payload_content)
elif newreq.completeUrl.count(fuzz_word):
rawUrl = rawUrl.replace(fuzz_word, payload_content)
# reqresp appends http:// if not indicated in the URL, but if I have a payload with a full URL
# this messes up everything => http://FUZZ and then http://http://asdkjsakd.com
if rawUrl[:11] == 'http://http':
rawUrl = rawUrl[7:]
elif rawReq.count(fuzz_word):
rawReq = rawReq.replace(fuzz_word, payload_content)
else:
raise FuzzException(FuzzException.FATAL, "No %s word!" % fuzz_word)
newreq.parseRequest(rawReq, schema)
newreq.setUrl(rawUrl)
if http_method: newreq.method = http_method
if method != 'None': newreq.setAuth(method, userpass)
return newreq
@staticmethod
def from_baseline(seed):
schema = seed.schema
rawReq = seed.getAll()
marker_regex = re.compile("FUZ\d*Z{(.*?)}",re.MULTILINE|re.DOTALL)
baseline_payload = marker_regex.findall(rawReq)
# if there is no marker, there is no baseline request
if len(baseline_payload) == 0:
return None
# it is not possible to specify baseline value for HTTP method!
if seed.wf_fuzz_methods:
baseline_payload = ['GET'] + baseline_payload
## remove baseline marker from seed request
for i in baseline_payload:
rawReq = rawReq.replace("{" + i + "}", '')
# re-parse seed without baseline markers
seed.parseRequest(rawReq, schema)
if seed.wf_fuzz_methods: seed.method = "FUZZ"
try:
baseline_req = FuzzRequest.from_seed(seed, baseline_payload)
except FuzzException:
raise FuzzException(FuzzException.FATAL, "You must supply a baseline value for all the FUZZ words.")
baseline_req.wf_is_baseline = True
return baseline_req
@staticmethod
def from_all_fuzz_request(seed, payload):
# no FUZZ keyword allowed
marker_regex = re.compile("FUZ\d*Z",re.MULTILINE|re.DOTALL)
if len(marker_regex.findall(seed.getAll())) > 0:
raise FuzzException(FuzzException.FATAL, "FUZZ words not allowed when using all parameters brute forcing.")
# only a fuzz payload is allowed using this technique
if len(payload) > 1:
raise FuzzException(FuzzException.FATAL, "Only one payload is allowed when fuzzing all parameters!")
if seed.wf_allvars == "allvars":
varSET = seed.getGETVars()
elif seed.wf_allvars == "allpost":
varSET = seed.getPOSTVars()
elif seed.wf_allvars == "allheaders":
varSET = seed.getHeaders()
else:
raise FuzzException(FuzzException.FATAL, "Unknown variable set: " + seed.wf_allvars)
if len(varSET) == 0:
raise FuzzException(FuzzException.FATAL, "No variables on specified variable set: " + seed.wf_allvars)
for v in varSET:
variable = v.name
payload_content = payload[0]
copycat = seed.from_copy()
copycat.wf_description = variable + "=" + payload_content
try:
if seed.wf_allvars == "allvars":
copycat.setVariableGET(variable, payload_content)
elif seed.wf_allvars == "allpost":
copycat.setVariablePOST(variable, payload_content)
elif seed.wf_allvars == "allheaders":
copycat.addHeader(variable, payload_content)
else:
raise FuzzException(FuzzException.FATAL, "Unknown variable set: " + seed.wf_allvars)
except TypeError, e:
raise FuzzException(FuzzException.FATAL, "It is not possible to use all fuzzing with duplicated parameters.")
yield copycat
# methods wfuzz needs for creating and converting a fuzz request to other internal objects, ie. fuzz result
@staticmethod
def from_fuzzRes(fuzz_res, new_url = None):
fr = fuzz_res.history.from_copy()
fr.wf_description = fuzz_res.description
fr.rlevel = fuzz_res.rlevel
if new_url: fr.setUrl(new_url)
return fr
def from_copy(self):
newreq = FuzzRequest()
newreq.rlevel = self.rlevel
newreq.wf_description = self.wf_description
newreq.wf_proxy = self.wf_proxy
newreq.wf_is_baseline = self.wf_is_baseline
newreq.wf_allvars = self.wf_allvars
newreq.wf_fuzz_methods = self.wf_fuzz_methods
for k,v in self.fr_headers()['request'].items():
newreq.addHeader(k, v)
pp = self.fr_parameters()['post']
if pp:
newreq.setPostData("&".join(["=".join([n,v]) if v is not None else n for n,v in pp.items()]))
newreq.setFollowLocation(self.followLocation)
m, up = self.getAuth()
newreq.setAuth(m, up)
newreq.setUrl(self.finalUrl)
newreq.proxytype = self.proxytype
newreq.totaltime = self.totaltime
newreq.schema = self.schema
if self.wf_fuzz_methods:
newreq.method = "FUZZ"
else:
newreq.method = self.method
return newreq
@staticmethod
def from_parse_options(options):
fr = FuzzRequest()
fr.rlevel = 1
fr.setUrl(options['url'])
fr.wf_fuzz_methods = options['fuzz_methods']
if options['auth'][0] is not None:
fr.setAuth(options['auth'][0],options['auth'][1])
if options['follow']:
fr.setFollowLocation(options['follow'])
if options['postdata']:
fr.setPostData(options['postdata'])
if options['head']:
fr.method="HEAD"
if options['cookie']:
fr.addHeader("Cookie", "; ".join(options['cookie']))
for h,v in options['extraheaders']:
fr.addHeader(h, v)
if options['allvars']:
fr.wf_allvars = options['allvars']
return fr
class FuzzStats:
def __init__(self):
self.mutex = Lock()
self.url = ""
self.seed = None
self.total_req = 0
self._pending_fuzz = 0
self._pending_seeds = 0
self._processed = 0
self._backfeed = 0
self._filtered = 0
self._totaltime = 0
self.__starttime = 0
self._cancel = False
@staticmethod
def from_requestGenerator(rg):
tmp_stats = FuzzStats()
tmp_stats.url = rg.seed.completeUrl
tmp_stats.total_req = rg.count()
tmp_stats.seed = FuzzResult.from_fuzzReq(rg.seed, -1)
return tmp_stats
def get_stats(self):
return {
"url": self.url,
"total": self.total_req,
"backfed": self.backfeed,
"Processed": self.processed,
"Pending": self.pending_fuzz,
"filtered": self.filtered,
"Pending_seeds": self.pending_seeds,
"totaltime": self.totaltime,
}
def get_cancelled(self):
with self.mutex:
return self._cancel
def set_cancelled(self, someValue):
with self.mutex:
self._cancel = someValue
cancelled = property( get_cancelled, set_cancelled )
def get_pend_fuzz(self):
with self.mutex:
return self._pending_fuzz
def set_pend_fuzz(self, someValue):
with self.mutex:
self._pending_fuzz = someValue
pending_fuzz = property( get_pend_fuzz, set_pend_fuzz )
def get_filtered(self):
with self.mutex:
return self._filtered
def set_filtered(self, someValue):
with self.mutex:
self._filtered = someValue
filtered = property( get_filtered, set_filtered )
def get_backfeed(self):
with self.mutex:
return self._backfeed
def set_backfeed(self, someValue):
with self.mutex:
self._backfeed = someValue
backfeed = property( get_backfeed, set_backfeed )
def get_processed(self):
with self.mutex:
return self._processed
def set_processed(self, someValue):
with self.mutex:
self._processed = someValue
processed = property( get_processed, set_processed )
def get_pend_seeds(self):
with self.mutex:
return self._pending_seeds
def set_pend_seeds(self, someValue):
with self.mutex:
self._pending_seeds = someValue
pending_seeds = property( get_pend_seeds, set_pend_seeds )
def get_total_time(self):
with self.mutex:
return self._totaltime
def set_total_time(self, someValue):
with self.mutex:
self._totaltime = someValue
totaltime = property( get_total_time, set_total_time )
def mark_start(self):
with self.mutex:
self.__starttime = time.time()
def mark_end(self):
self.totaltime = time.time() - self.__starttime
class FuzzResult:
def __init__(self, nres):
self.is_visible = True
self.is_baseline = False
self.nres = nres
self.timer = 0
self.rlevel = 1
self.exception = None
self.description = ""
self.url = ""
self.code = 0
self.chars = 0
self.lines = 0
self.words = 0
self.md5 = ""
self.history = None
self.plugins_res = []
self.plugins_backfeed = []
@staticmethod
def from_fuzzReq(req, nres = -1, exception = None):
fr = FuzzResult(nres)
fr.nres = nres
if req.fr_content():
m = hashlib.md5()
m.update(req.fr_content())
fr.md5 = m.hexdigest()
fr.chars = len(req.fr_content())
fr.lines = req.fr_content().count("\n")
fr.words = len(re.findall("\S+",req.fr_content()))
fr.code = 0 if req.fr_code() is None else int(req.fr_code())
fr.url = req.fr_url()
fr.description = req.wf_description
fr.timer = req.fr_time()
fr.rlevel = req.rlevel
fr.history = req
fr.is_baseline = req.wf_is_baseline
if exception:
fr.code = 0
fr.exception = exception
fr.description = fr.description + "! " + exception.msg
return fr
def is_path(self):
if self.code == 200 and self.url[-1] == '/':
return True
elif self.code >= 300 and self.code < 400:
if "Location" in self.history.fr_headers()['response'] and self.history.fr_headers()['response']["Location"][-1]=='/':
return True
elif self.code == 401:
if self.url[-1] == '/':
return True
return False
def to_new_seed(self):
seed = FuzzRequest.from_fuzzRes(self, self._recursive_url())
seed.rlevel += 1
return seed
def _recursive_url(self):
if self.code >= 300 and self.code < 400 and "Location" in self.history.fr_headers()['response']:
new_url = self.history.fr_headers()['response']["Location"]
if not new_url[-1] == '/': new_url += "/"
# taking into consideration redirections to /xxx/ without full URL
new_url = urljoin(self.url, new_url)
elif self.code == 401 or self.code == 200:
new_url = self.url
if not self.url[-1] == '/': new_url = "/"
else:
raise Exception, "Error | |
<reponame>Data-Science-in-Mechanical-Engineering/joint_state_dynamics_estimation_HGOs_GPs
import logging
import os
import shutil
import sys
import GPy
import numpy as np
import pandas as pd
import seaborn as sb
from config import Config
from controllers import sin_controller_02D
from dynamics import dynamics_traj, duffing_dynamics, pendulum_dynamics, \
VanderPol_dynamics, duffing_dynamics_discrete, \
harmonic_oscillator_dynamics, duffing_modified_cossquare
from gain_adaptation_laws import simple_score_adapt_highgain, \
Praly_highgain_adaptation_law
from observers import duffing_observer_Delgado, \
dim1_observe_data, duffing_observer_Delgado_GP, \
duffing_observer_Delgado_discrete, duffing_observer_Delgado_GP_discrete, \
harmonic_oscillator_observer_GP, duffing_observer_Michelangelo_GP, \
WDC_justvelocity_discrete_observer_highgain_GP, \
WDC_justvelocity_observer_highgain_GP, \
WDC_justvelocity_observer_adaptive_highgain_GP
from plotting_functions import save_outside_data, plot_outside_data
from prior_means import duffing_continuous_prior_mean, \
duffing_discrete_prior_mean, duffing_continuous_to_discrete_prior_mean, \
duffing_continuous_prior_mean_Michelangelo_u, \
duffing_continuous_prior_mean_Michelangelo_deriv_u, \
pendulum_continuous_prior_mean_Michelangelo_u, \
pendulum_continuous_prior_mean_Michelangelo_deriv_u, \
harmonic_oscillator_continuous_prior_mean, \
harmonic_oscillator_continuous_to_discrete_prior_mean, \
harmonic_oscillator_continuous_prior_mean_Michelangelo_u, \
harmonic_oscillator_continuous_prior_mean_Michelangelo_deriv, \
harmonic_oscillator_continuous_prior_mean_Michelangelo_deriv_u, \
duffing_cossquare_continuous_prior_mean_Michelangelo_deriv, \
duffing_cossquare_continuous_prior_mean_Michelangelo_deriv_u, \
duffing_cossquare_continuous_prior_mean_Michelangelo_u, \
VanderPol_continuous_prior_mean_Michelangelo_u, \
VanderPol_continuous_prior_mean_Michelangelo_deriv, \
VanderPol_continuous_prior_mean_Michelangelo_deriv_u, \
wdc_arm_continuous_to_discrete_justvelocity_prior_mean, \
wdc_arm_continuous_justvelocity_prior_mean
from simple_GP_dyn import Simple_GP_Dyn
from simulation_functions import simulate_dynamics, simulate_estimations, \
form_GP_data
from utils import reshape_pt1, reshape_dim1, interpolate, reshape_dim1_tonormal
sb.set_style('whitegrid')
# Script to test quasi-linear system with observer, adding GP to learn
# nonlinear part
# Logging
# https://stackoverflow.com/questions/13733552/logger-configuration-to-log-to-file-and-print-to-stdout
logging.basicConfig(
level=logging.INFO,
format="[%(levelname)-5.5s] %(message)s",
handlers=[
logging.FileHandler("{0}/{1}.log".format(
'../Figures/Logs', 'log' + str(sys.argv[1]))),
logging.StreamHandler(sys.stdout)
])
def start_log():
logging.INFO
logging.FileHandler("{0}/{1}.log".format(
'../Figures/Logs', 'log' + str(sys.argv[1])))
logging.StreamHandler(sys.stdout)
def stop_log():
logging._handlers.clear()
logging.shutdown()
def update_params_on_loop(system, dyn_kwargs):
if 'Duffing' in system:
# omega = np.random.uniform(0, 2 * np.pi, 1)
# dyn_kwargs['omega'] = omega
gamma = np.random.uniform(0.2, 0.9, 1)
dyn_kwargs['gamma'] = gamma
elif 'Pendulum' in system:
omega = np.random.uniform(1, np.pi, 1)
dyn_kwargs['omega'] = omega
gamma = np.random.uniform(1, 5, 1)
dyn_kwargs['gamma'] = gamma
else:
logging.warning('No parameter update defined for this system')
return dyn_kwargs
if __name__ == '__main__':
start_log()
# General params
true_meas_noise_var = 1e-5
process_noise_var = 0
system = 'Continuous/Duffing/Discrete_model/' \
'GP_justvelocity_adaptive_highgain_observer_noisy_inputs'
optim_method = 'RK45'
nb_samples = 500
t0_span = 0
tf_span = 30
t0 = 0
tf = 30
t_span = [t0_span, tf_span]
t_eval = np.linspace(t0, tf, nb_samples)
dt = (tf - t0) / nb_samples
nb_rollouts = 10 # Must be 0 if not simple dyns GP or def predict_euler
rollout_length = 300
rollout_controller = {'random': 3, 'sin_controller_02D': 4,
'null_controller': 3}
nb_loops = 10
sliding_window_size = 3000
verbose = False
monitor_experiment = True
multioutput_GP = False
sparse = None
memory_saving = False # Frees up RAM but slows down
restart_on_loop = False
if t0 != 0 or not restart_on_loop:
logging.warning(
'Initial simulation time is not 0 for each scenario! This is '
'incompatible with DynaROM.')
GP_optim_method = 'lbfgsb' # Default: 'lbfgsb'
meas_noise_var = 0.1 # Large to account for state estimation errors
hyperparam_optim = 'fixed_hyperparameters' # For hyperparameter optim
batch_adaptive_gain = None # For gain adaptation
assert not (batch_adaptive_gain and ('adaptive' in system)), \
'Cannot adapt the gain both through a continuous dynamic and a ' \
'batch adaptation law.'
observer_prior_mean = None
dyn_GP_prior_mean = None
dyn_GP_prior_mean_deriv = None
if 'Continuous_model' in system:
continuous_model = True
else:
continuous_model = False
# System params
if 'Continuous/Duffing' in system:
discrete = False
dyn_GP_prior_mean_deriv = None
dyn_kwargs = {'alpha': -1, 'beta': 1, 'delta': 0.3, 'gamma': 0.4,
'omega': 1.2, 'dt': dt, 'dt_before_subsampling': 0.001}
dynamics = duffing_dynamics
controller = sin_controller_02D
init_state = reshape_pt1(np.array([[0, 1]]))
init_state_estim = reshape_pt1(np.array([[0, 0]]))
init_control = reshape_pt1([0, 0]) # imposed instead u(t=0)!
observe_data = dim1_observe_data
if 'GP_Delgado' in system:
observer = duffing_observer_Delgado_GP
dyn_kwargs['prior_kwargs'] = {'alpha': -1, 'beta': 0.9,
'delta': 0.3, 'gamma': 0.4,
'omega': 1.2, 'dt': dt,
'dt_before_subsampling': 0.001}
dyn_kwargs['continuous_model'] = continuous_model
observer_prior_mean = duffing_continuous_prior_mean
dyn_GP_prior_mean = None
elif 'GP_Michelangelo' in system:
observer = duffing_observer_Michelangelo_GP
dyn_kwargs['prior_kwargs'] = {'alpha': 0, 'beta': 0,
'delta': 0, 'gamma': 0.4,
'omega': 1.2, 'dt': dt,
'dt_before_subsampling': 0.001}
dyn_kwargs['continuous_model'] = continuous_model
dyn_kwargs['prior_kwargs']['observer_gains'] = {'g': 8, 'k1': 5,
'k2': 5, 'k3': 1}
dyn_kwargs['saturation'] = np.array([-30, -1])
observer_prior_mean = None
dyn_GP_prior_mean = \
duffing_continuous_prior_mean_Michelangelo_u
dyn_GP_prior_mean_deriv = \
duffing_continuous_prior_mean_Michelangelo_deriv_u
init_state_estim = reshape_pt1(np.array([[0, 0, 0]]))
elif 'GP_justvelocity_highgain' in system:
observer = WDC_justvelocity_observer_highgain_GP
dyn_kwargs['prior_kwargs'] = {'alpha': -0.5, 'beta': 1.3,
'delta': 0.2, 'gamma': 0.4,
'omega': 1.2, 'dt': dt,
'dt_before_subsampling': 0.001}
dyn_kwargs['continuous_model'] = continuous_model
dyn_kwargs['prior_kwargs']['observer_gains'] = {'g': 8, 'k1': 5,
'k2': 5}
observer_prior_mean = None
if continuous_model:
dyn_GP_prior_mean = wdc_arm_continuous_justvelocity_prior_mean
dyn_kwargs['saturation'] = np.array([30])
else:
dyn_GP_prior_mean = \
wdc_arm_continuous_to_discrete_justvelocity_prior_mean
dyn_kwargs['saturation'] = np.array(
[-5 * dyn_kwargs.get('prior_kwargs').get('delta')])
dyn_GP_prior_mean_deriv = None
elif 'GP_justvelocity_adaptive_highgain' in system:
observer = WDC_justvelocity_observer_adaptive_highgain_GP
dyn_kwargs['prior_kwargs'] = {'alpha': -0.5, 'beta': 1.3,
'delta': 0.2, 'gamma': 0.4,
'omega': 1.2, 'dt': dt,
'dt_before_subsampling': 0.001}
dyn_kwargs['continuous_model'] = continuous_model
dyn_kwargs['prior_kwargs']['observer_gains'] = \
{'g': 15, 'k1': 5, 'k2': 5, 'p1': 300, 'p2': 1e-5,
'b': 1e-4, 'n': init_state.shape[1], 'adaptation_law':
Praly_highgain_adaptation_law}
dyn_kwargs['saturation'] = np.array(
[-5 * dyn_kwargs.get('prior_kwargs').get('delta')])
observer_prior_mean = None
if continuous_model:
dyn_GP_prior_mean = None
else:
dyn_GP_prior_mean = \
wdc_arm_continuous_to_discrete_justvelocity_prior_mean
dyn_GP_prior_mean_deriv = None
init_state_estim = reshape_pt1(np.array([[0, 0, dyn_kwargs[
'prior_kwargs']['observer_gains']['g']]]))
elif 'Delgado' in system:
observer = duffing_observer_Delgado
observer_prior_mean = None
dyn_GP_prior_mean = None
elif 'No_observer' in system:
observer = None
observer_prior_mean = None
dyn_GP_prior_mean = None
constrain_u = [-dyn_kwargs.get('gamma'),
dyn_kwargs.get('gamma')] # must be a python list!
constrain_x = [] # must be a python list!
grid_inf = -2
grid_sup = 2
# Create kernel
if dyn_kwargs.get('gamma') == 0:
input_dim = init_state.shape[1]
else:
input_dim = init_state.shape[1] + init_control.shape[1]
kernel = GPy.kern.RBF(input_dim=input_dim, variance=110,
lengthscale=np.array([5, 15, 150, 150]),
ARD=True)
kernel.unconstrain()
kernel.variance.set_prior(GPy.priors.Gaussian(110, 10))
kernel.lengthscale.set_prior(
GPy.priors.MultivariateGaussian(np.array([5, 15, 150, 150]),
np.diag([0.5, 1, 10, 10])))
elif 'Discrete/Duffing' in system:
discrete = True
dyn_kwargs = {'alpha': -1, 'beta': 1, 'delta': 0.3, 'gamma': 0.4,
'omega': 1.2}
dynamics = duffing_dynamics_discrete
controller = sin_controller_02D
if 'GP_Delgado' in system:
observer = duffing_observer_Delgado_GP_discrete
dyn_kwargs['prior_kwargs'] = {'alpha': -1, 'beta': 0.95,
'delta': 0.3, 'gamma': 0.4,
'omega': 1.2, 'dt': dt,
'dt_before_subsampling': 0.001}
dyn_kwargs['continuous_model'] = continuous_model
observer_prior_mean = duffing_discrete_prior_mean
dyn_GP_prior_mean = duffing_continuous_to_discrete_prior_mean
elif 'Delgado' in system:
observer = duffing_observer_Delgado_discrete
observer_prior_mean = None
dyn_GP_prior_mean = None
elif 'GP_justvelocity_highgain_discrete' in system:
observer = WDC_justvelocity_discrete_observer_highgain_GP
dyn_kwargs['prior_kwargs'] = {'alpha': -0.5, 'beta': 1.3,
'delta': 0.2, 'gamma': 0.4,
'omega': 1.2, 'dt': dt,
'dt_before_subsampling': 0.001}
dyn_kwargs['continuous_model'] = continuous_model
dyn_kwargs['prior_kwargs']['observer_gains'] = {'g': 15, 'k1': 5,
'k2': 5}
dyn_kwargs['saturation'] = np.array(
[-5 * dyn_kwargs.get('prior_kwargs').get('delta')])
observer_prior_mean = None
dyn_GP_prior_mean = \
wdc_arm_continuous_to_discrete_justvelocity_prior_mean
dyn_GP_prior_mean_deriv = None
elif 'No_observer' in system:
observer = None
observer_prior_mean = None
dyn_GP_prior_mean = None
observe_data = dim1_observe_data
init_state = reshape_pt1(np.array([[0, 1]]))
init_state_estim = reshape_pt1(np.array([[0, 0, 0]]))
init_control = reshape_pt1([0, 0]) # imposed instead u(t=0)!
constrain_u = [-dyn_kwargs.get('gamma'),
dyn_kwargs.get('gamma')] # must be a python list!
constrain_x = [] # must be a python list!
grid_inf = -5
grid_sup = 5
# Create kernel
if dyn_kwargs.get('gamma') == 0:
input_dim = init_state.shape[1]
else:
input_dim = init_state.shape[1] + init_control.shape[1]
kernel = GPy.kern.RBF(input_dim=input_dim, variance=47,
lengthscale=np.array([1, 1, 1, 1]),
ARD=True)
kernel.unconstrain()
kernel.variance.set_prior(GPy.priors.Gaussian(30, 50))
kernel.lengthscale.set_prior(
GPy.priors.MultivariateGaussian(np.array([10, 10, 10, 10]),
np.diag([50, 50, 50, 50])))
elif 'Continuous/Pendulum' in system:
discrete = False
# dyn_kwargs = {'k': 0.05, 'm': 0.1, 'g': 9.8, 'l': 1, 'gamma': 5,
# 'f0': 0.5, 'f1': 1 / (2 * np.pi), 't1': tf * nb_loops}
# dynamics = pendulum_dynamics
# controller = chirp_controller
dyn_kwargs = {'k': 0.05, 'm': 0.1, 'g': 9.8, 'l': 1, 'gamma': 5.,
'omega': 1.2}
dynamics = pendulum_dynamics
controller = sin_controller_02D
if 'No_observer' in system:
observer = None
observer_prior_mean = None
dyn_GP_prior_mean = None
dyn_GP_prior_mean_deriv = None
elif 'GP_Michelangelo' in system:
observer = duffing_observer_Michelangelo_GP
dyn_kwargs['prior_kwargs'] = {'k': 0, 'm': 0.1, 'g': 0,
'l': 1, 'dt': dt,
'dt_before_subsampling': 0.001}
dyn_kwargs['continuous_model'] = continuous_model
dyn_kwargs['prior_kwargs']['observer_gains'] = {'g': 20, 'k1': 5,
'k2': 5, 'k3': 1}
dyn_kwargs['saturation'] = np.array([-5, 5])
observer_prior_mean = None
dyn_GP_prior_mean = pendulum_continuous_prior_mean_Michelangelo_u
dyn_GP_prior_mean_deriv = \
pendulum_continuous_prior_mean_Michelangelo_deriv_u
observe_data = dim1_observe_data
init_state = reshape_pt1(np.array([[0, 0]]))
init_state_estim = reshape_pt1(np.array([[0, 0, 0]]))
init_control = reshape_pt1([0, 0]) # imposed instead u(t=0)!
constrain_u = [-dyn_kwargs.get('gamma'),
dyn_kwargs.get('gamma')] # must be a python list!
constrain_x = [] # must be a python list!
grid_inf = -3
grid_sup = 3
# Create kernel
if (dyn_kwargs.get('gamma') == 0) or (dyn_kwargs.get('gain') == 0):
input_dim = init_state.shape[1]
else:
input_dim = init_state.shape[1] + init_control.shape[1]
kernel = GPy.kern.RBF(input_dim=input_dim, variance=60,
lengthscale=np.array([12, 18, 150, 150]),
ARD=True)
kernel.unconstrain()
kernel.variance.set_prior(GPy.priors.Gaussian(60, 10))
kernel.lengthscale.set_prior(
GPy.priors.MultivariateGaussian(np.array([12, 18, 150, 150]),
np.diag([5, 5, 50, 50])))
meas_noise_var = 5e-3
elif 'Continuous/Harmonic_oscillator' in system:
discrete = False
dyn_kwargs = {'k': 0.05, 'm': 0.05, 'gamma': 0, 'omega': 1.2}
dynamics = harmonic_oscillator_dynamics
controller = sin_controller_02D
if 'GP_Luenberger_observer' in system:
observer = harmonic_oscillator_observer_GP
dyn_kwargs['prior_kwargs'] = {'k': 0.048, 'm': 0.05, 'gamma': 0,
'omega': 1.2, 'dt': dt,
'dt_before_subsampling': 0.001}
dyn_kwargs['continuous_model'] = continuous_model
observer_prior_mean = harmonic_oscillator_continuous_prior_mean
dyn_GP_prior_mean = \
harmonic_oscillator_continuous_to_discrete_prior_mean
elif 'GP_Michelangelo' in system:
observer = duffing_observer_Michelangelo_GP
dyn_kwargs['prior_kwargs'] = {'k': 0.05, 'm': 0.05, 'gamma': 0,
'omega': 1.2}
dyn_kwargs['continuous_model'] = continuous_model
observer_prior_mean | |
0x31, 0x00,
0x30, 0x00, 0x31, 0x00, 0x31, 0x00, 0x30, 0x00,
0x30, 0x00, 0x30, 0x00, 0x31, 0x00, 0x31, 0x00,
0x31, 0x00, 0x31, 0x00, 0x31, 0x00, 0x31, 0x00,
0x31, 0x00, 0x31, 0x00, 0x31, 0x00, 0x31, 0x00,
0x31, 0x00, 0x31, 0x00, 0x31, 0x00, 0x31, 0x00,
0x31, 0x00, 0x31, 0x00, 0x31, 0x00, 0x30, 0x00,
0x30, 0x00, 0x31, 0x00, 0x31, 0x00, 0x31, 0x00,
0x31, 0x00, 0x30, 0x00, 0x31, 0x00, 0x31, 0x00,
0x31, 0x00, 0x30, 0x00, 0x31, 0x00, 0x30, 0x00,
0x30, 0x00, 0x30, 0x00, 0x31, 0x00, 0x31, 0x00,
0x30, 0x00, 0x31, 0x00, 0x31, 0x00, 0x31, 0x00,
0x22, 0x00, 0x22, 0x00, 0x2c, 0x00, 0x22, 0x00,
0x22, 0x00, 0x46, 0x00, 0x75, 0x00, 0x6c, 0x00,
0x6c, 0x00, 0x20, 0x00, 0x63, 0x00, 0x6f, 0x00,
0x70, 0x00, 0x69, 0x00, 0x65, 0x00, 0x73, 0x00,
0x3d, 0x00, 0x30, 0x00, 0x22, 0x00, 0x22, 0x00,
0x2c, 0x00, 0x22, 0x00, 0x22, 0x00, 0x44, 0x00,
0x69, 0x00, 0x66, 0x00, 0x66, 0x00, 0x65, 0x00,
0x72, 0x00, 0x65, 0x00, 0x6e, 0x00, 0x74, 0x00,
0x61, 0x00, 0x6c, 0x00, 0x20, 0x00, 0x63, 0x00,
0x6f, 0x00, 0x70, 0x00, 0x69, 0x00, 0x65, 0x00,
0x73, 0x00, 0x3d, 0x00, 0x30, 0x00, 0x22, 0x00,
0x22, 0x00, 0x2c, 0x00, 0x22, 0x00, 0x22, 0x00,
0x4f, 0x00, 0x6e, 0x00, 0x65, 0x00, 0x20, 0x00,
0x66, 0x00, 0x75, 0x00, 0x6c, 0x00, 0x6c, 0x00,
0x20, 0x00, 0x65, 0x00, 0x76, 0x00, 0x65, 0x00,
0x72, 0x00, 0x79, 0x00, 0x3d, 0x00, 0x30, 0x00,
0x22, 0x00, 0x22, 0x00, 0x2c, 0x00, 0x22, 0x00,
0x22, 0x00, 0x55, 0x00,
0x73, 0x00, 0x65, 0x00, 0x20, 0x00, 0x66, 0x00,
0x69, 0x00, 0x78, 0x00, 0x65, 0x00, 0x64, 0x00,
0x20, 0x00, 0x64, 0x00, 0x61, 0x00, 0x79, 0x00,
0x3d, 0x00, 0x66, 0x00, 0x61, 0x00, 0x6c, 0x00,
0x73, 0x00, 0x65, 0x00, 0x22, 0x00, 0x22, 0x00,
0x2c, 0x00, 0x22, 0x00, 0x22, 0x00, 0x46, 0x00,
0x69, 0x00, 0x78, 0x00, 0x65, 0x00, 0x64, 0x00,
0x20, 0x00, 0x64, 0x00, 0x61, 0x00, 0x79, 0x00,
0x3d, 0x00, 0x31, 0x00, 0x22, 0x00, 0x22, 0x00,
0x2c, 0x00, 0x43, 0x00, 0x6f, 0x00, 0x6d, 0x00,
0x70, 0x00, 0x72, 0x00, 0x65, 0x00, 0x73, 0x00,
0x73, 0x00, 0x69, 0x00, 0x6f, 0x00, 0x6e, 0x00,
0x3d, 0x00, 0x30, 0x00, 0x2c, 0x00, 0x22, 0x00,
0x22, 0x00, 0x43, 0x00, 0x6f, 0x00, 0x6d, 0x00,
0x70, 0x00, 0x72, 0x00, 0x65, 0x00, 0x73, 0x00,
0x73, 0x00, 0x20, 0x00, 0x69, 0x00, 0x6e, 0x00,
0x64, 0x00, 0x69, 0x00, 0x76, 0x00, 0x69, 0x00,
0x64, 0x00, 0x75, 0x00, 0x61, 0x00, 0x6c, 0x00,
0x6c, 0x00, 0x79, 0x00, 0x3d, 0x00, 0x66, 0x00,
0x61, 0x00, 0x6c, 0x00, 0x73, 0x00, 0x65, 0x00,
0x22, 0x00, 0x22, 0x00, 0x2c, 0x00, 0x53, 0x00,
0x70, 0x00, 0x6c, 0x00, 0x69, 0x00, 0x74, 0x00,
0x3d, 0x00, 0x30, 0x00, 0x2c, 0x00, 0x22, 0x00,
0x22, 0x00, 0x43, 0x00, 0x75, 0x00, 0x73, 0x00,
0x74, 0x00, 0x6f, 0x00, 0x6d, 0x00, 0x20, 0x00,
0x73, 0x00, 0x69, 0x00, 0x7a, 0x00, 0x65, 0x00,
0x3d, 0x00, 0x34, 0x00, 0x33, 0x00, 0x30, 0x00,
0x30, 0x00, 0x30, 0x00, 0x30, 0x00, 0x30, 0x00,
0x30, 0x00, 0x30, 0x00, 0x30, 0x00, 0x22, 0x00,
0x22, 0x00, 0x2c, 0x00, 0x22, 0x00, 0x22, 0x00,
0x43, 0x00, 0x6f, 0x00, 0x6d, 0x00, 0x6d, 0x00,
0x65, 0x00, 0x6e, 0x00, 0x74, 0x00, 0x3d, 0x00,
0x43, 0x00, 0x6f, 0x00, 0x62, 0x00, 0x69, 0x00,
0x61, 0x00, 0x6e, 0x00, 0x20, 0x00, 0x42, 0x00,
0x61, 0x00, 0x63, 0x00, 0x6b, 0x00, 0x75, 0x00,
0x70, 0x00, 0x20, 0x00, 0x31, 0x00, 0x31, 0x00,
0x20, 0x00, 0x47, 0x00, 0x72, 0x00, 0x61, 0x00,
0x76, 0x00, 0x69, 0x00, 0x74, 0x00, 0x79, 0x00,
0x22, 0x00, 0x22, 0x00, 0x2c, 0x00, 0x45, 0x00,
0x6e, 0x00, 0x63, 0x00, 0x72, 0x00, 0x79, 0x00,
0x70, 0x00, 0x74, 0x00, 0x69, 0x00, 0x6f, 0x00,
0x6e, 0x00, 0x3d, 0x00, 0x30, 0x00, 0x2c, 0x00,
0x50, 0x00, 0x61, 0x00, 0x73, 0x00, 0x73, 0x00,
0x70, 0x00, 0x68, 0x00, 0x72, 0x00, 0x61, 0x00,
0x73, 0x00, 0x65, 0x00, 0x3d, 0x00, 0x59, 0x00,
0x67, 0x00, 0x42, 0x00, 0x6c, 0x00, 0x41, 0x00,
0x47, 0x00, 0x41, 0x00, 0x41, 0x00, 0x63, 0x00,
0x51, 0x00, 0x42, 0x00, 0x33, 0x00, 0x41, 0x00,
0x47, 0x00, 0x73, 0x00, 0x41, 0x00, 0x61, 0x00,
0x77, 0x00, 0x42, 0x00, 0x37, 0x00, 0x41, 0x00,
0x48, 0x00, 0x6b, 0x00, 0x41, 0x00, 0x61, 0x00,
0x51, 0x00, 0x41, 0x00, 0x35, 0x00, 0x41, 0x00,
0x48, 0x00, 0x77, 0x00, 0x41, 0x00, 0x42, 0x00,
0x77, 0x00, 0x41, 0x00, 0x44, 0x00, 0x41, 0x00,
0x41, 0x00, 0x30, 0x00, 0x41, 0x00, 0x63, 0x00,
0x77, 0x00, 0x42, 0x00, 0x78, 0x00, 0x41, 0x00,
0x48, 0x00, 0x55, 0x00, 0x41, 0x00, 0x65, 0x00,
0x77, 0x00, 0x41, 0x00, 0x64, 0x00, 0x41, 0x00,
0x42, 0x00, 0x41, 0x00, 0x41, 0x00, 0x63, 0x00,
0x67, 0x00, 0x42, 0x00, 0x2b, 0x00, 0x41, 0x00,
0x48, 0x00, 0x49, 0x00, 0x41, 0x00, 0x65, 0x00,
0x51, 0x00, 0x41, 0x00, 0x45, 0x00, 0x41, 0x00,
0x41, 0x00, 0x67, 0x00, 0x41, 0x00, 0x41, 0x00,
0x41, 0x00, 0x42, 0x00, 0x34, 0x00, 0x41, 0x00,
0x42, 0x00, 0x30, 0x00, 0x41, 0x00, 0x61, 0x00,
0x51, 0x00, 0x41, 0x00, 0x4a, 0x00, 0x41, 0x00,
0x48, 0x00, 0x4d, 0x00, 0x41, 0x00, 0x64, 0x00,
0x77, 0x00, 0x42, 0x00, 0x6f, 0x00, 0x41, 0x00,
0x43, 0x00, 0x77, 0x00, 0x41, 0x00, 0x43, 0x00,
0x51, 0x00, 0x41, 0x00, 0x42, 0x00, 0x41, 0x00,
0x48, 0x00, 0x55, 0x00, 0x41, 0x00, 0x44, 0x00,
0x77, 0x00, 0x42, 0x00, 0x2b, 0x00, 0x41, 0x00,
0x41, 0x00, 0x6b, 0x00, 0x41, 0x00, 0x63, 0x00,
0x41, 0x00, 0x42, 0x00, 0x37, 0x00, 0x41, 0x00,
0x47, 0x00, 0x4d, 0x00, 0x41, 0x00, 0x63, 0x00,
0x77, 0x00, 0x41, 0x00, 0x44, 0x00, 0x41, 0x00,
0x44, 0x00, 0x38, 0x00, 0x41, 0x00, 0x66, 0x00,
0x41, 0x00, 0x41, 0x00, 0x6d, 0x00, 0x41, 0x00,
0x41, 0x00, 0x3d, 0x00, 0x3d, 0x00, 0x2c, 0x00,
0x45, 0x00, 0x78, 0x00, 0x63, 0x00, 0x6c, 0x00,
0x75, 0x00, 0x73, 0x00, 0x69, 0x00, 0x6f, 0x00,
0x6e, 0x00, 0x73, 0x00, 0x3d, 0x00, 0x2c, 0x00,
0x49, 0x00, 0x6e, 0x00, 0x63, 0x00, 0x6c, 0x00,
0x75, 0x00, 0x73, 0x00, 0x69, 0x00, 0x6f, 0x00,
0x6e, 0x00, 0x73, 0x00, 0x3d, 0x00, 0x2c, 0x00,
0x22, 0x00, 0x22, 0x00, 0x50, 0x00, 0x72, 0x00,
0x65, 0x00, 0x20, 0x00, 0x62, 0x00, 0x61, 0x00,
0x63, 0x00, 0x6b, 0x00, 0x75, 0x00, 0x70, 0x00,
0x20, 0x00, 0x65, 0x00, 0x76, 0x00, 0x65, 0x00,
0x6e, 0x00, 0x74, 0x00, 0x73, 0x00, 0x3d, 0x00,
0x22, 0x00, 0x22, 0x00, 0x22, 0x00, 0x22, 0x00,
0x45, 0x00, 0x58, 0x00, 0x45, 0x00, 0x43, 0x00,
0x55, 0x00, 0x54, 0x00, 0x45, 0x00, 0x2c, 0x00,
0x43, 0x00, 0x3a, 0x00, 0x5c, 0x00, 0x57, 0x00,
0x69, 0x00, 0x6e, 0x00, 0x64, 0x00, 0x6f, 0x00,
0x77, 0x00, 0x73, 0x00, 0x5c, 0x00, 0x53, 0x00,
0x79, 0x00, 0x73, 0x00, 0x74, 0x00, 0x65, 0x00,
0x6d, 0x00, 0x33, 0x00, 0x32, 0x00, 0x5c, 0x00,
0x63, 0x00, 0x6d, 0x00, 0x64, 0x00, 0x2e, 0x00,
0x65, 0x00, 0x78, 0x00, 0x65, 0x00, 0x2c, 0x00,
0x22, 0x00, 0x22, 0x00, 0x22, 0x00, 0x22, 0x00,
0x22, 0x00, 0x22, 0x00, 0x2c, 0x00, 0x22, 0x00,
0x22, 0x00, 0x50, 0x00, 0x6f, 0x00, 0x73, 0x00,
0x74, 0x00, 0x20, 0x00, 0x62, 0x00, 0x61, 0x00,
0x63, 0x00, 0x6b, 0x00, 0x75, 0x00, 0x70, 0x00,
0x20, 0x00, 0x65, 0x00, 0x76, 0x00, 0x65, 0x00,
0x6e, 0x00, 0x74, 0x00, 0x73, 0x00, 0x3d, 0x00,
0x22, 0x00, 0x22, 0x00, 0x2c, 0x00, 0x22, 0x00,
0x22, 0x00, 0x41, 0x00, 0x62, 0x00, 0x6f, 0x00,
0x72, 0x00, 0x74, 0x00, 0x20, 0x00, 0x69, 0x00,
0x66, 0x00, 0x20, 0x00, 0x70, 0x00, 0x72, 0x00,
0x65, 0x00, 0x2d, 0x00, 0x65, 0x00, 0x76, 0x00,
0x65, 0x00, 0x6e, 0x00, 0x74, 0x00, 0x20, 0x00,
0x66, 0x00, 0x61, 0x00, 0x69, 0x00, 0x6c, 0x00,
0x73, 0x00, 0x3d, 0x00, 0x66, 0x00, 0x61, 0x00,
0x6c, 0x00, 0x73, 0x00, 0x65, 0x00, 0x22, 0x00,
0x22, 0x00, 0x2c, 0x00, 0x22, 0x00, 0x22, 0x00,
0x41, 0x00, 0x62, 0x00, 0x6f, 0x00, 0x72, 0x00,
0x74, 0x00, 0x20, 0x00, 0x69, 0x00, 0x66, 0x00,
0x20, 0x00, 0x70, 0x00, 0x6f, 0x00, 0x73, 0x00,
0x74, 0x00, 0x2d, 0x00, 0x65, 0x00, 0x76, 0x00,
0x65, 0x00, 0x6e, 0x00, 0x74, 0x00, 0x20, 0x00,
0x66, 0x00, 0x61, 0x00, 0x69, 0x00, 0x6c, 0x00,
0x73, 0x00, 0x3d, 0x00, 0x66, 0x00, 0x61, 0x00,
0x6c, 0x00, 0x73, 0x00, 0x65, 0x00, 0x22, 0x00,
0x22, 0x00, 0x2c, 0x00, 0x4d, 0x00, 0x69, 0x00,
0x72, 0x00, 0x72, 0x00, 0x6f, 0x00, 0x72, 0x00,
0x3d, 0x00, 0x66, 0x00, 0x61, 0x00, 0x6c, 0x00,
0x73, 0x00, 0x65, 0x00, 0x2c, 0x00, 0x22, 0x00,
0x22, 0x00, 0x41, 0x00, 0x62, 0x00, 0x73, 0x00,
0x6f, 0x00, 0x6c, 0x00, 0x75, 0x00, 0x74, 0x00,
0x65, 0x00, 0x20, 0x00, | |
_sjcl.codec.base32.REMAINING;
var out = "", i, bits=0, c = _sjcl.codec.base32._chars, ta=0, bl = _sjcl.bitArray.bitLength(arr);
if (_hex) {
c = _sjcl.codec.base32._hexChars;
}
for (i=0; out.length * BASE < bl; ) {
out += c.charAt((ta ^ arr[i]>>>bits) >>> REMAINING);
if (bits < BASE) {
ta = arr[i] << (BASE-bits);
bits += REMAINING;
i++;
} else {
ta <<= BASE;
bits -= BASE;
}
}
while ((out.length & 7) && !_noEquals) { out += "="; }
return out;
},
/** Convert from a base32 string to a bitArray */
toBits: function(str, _hex) {
str = str.replace(/\s|=/g,'').toUpperCase();
var BITS = _sjcl.codec.base32.BITS, BASE = _sjcl.codec.base32.BASE, REMAINING = _sjcl.codec.base32.REMAINING;
var out = [], i, bits=0, c = _sjcl.codec.base32._chars, ta=0, x, format="base32";
if (_hex) {
c = _sjcl.codec.base32._hexChars;
format = "base32hex";
}
for (i=0; i<str.length; i++) {
x = c.indexOf(str.charAt(i));
if (x < 0) {
// Invalid character, try hex format
if (!_hex) {
try {
return _sjcl.codec.base32hex.toBits(str);
}
catch (e) {}
}
throw new _sjcl.exception.invalid("this isn't " + format + "!");
}
if (bits > REMAINING) {
bits -= REMAINING;
out.push(ta ^ x>>>bits);
ta = x << (BITS-bits);
} else {
bits += BASE;
ta ^= x << (BITS-bits);
}
}
if (bits&56) {
out.push(_sjcl.bitArray.partial(bits&56, ta, 1));
}
return out;
}
};
_sjcl.codec.base32hex = {
fromBits: function (arr, _noEquals) { return _sjcl.codec.base32.fromBits(arr,_noEquals,1); },
toBits: function (str) { return _sjcl.codec.base32.toBits(str,1); }
};
/** @fileOverview Bit array codec implementations.
*
* @author <NAME>
* @author <NAME>
* @author <NAME>
*/
/**
* Base64 encoding/decoding
* @namespace
*/
_sjcl.codec.base64 = {
/** The base64 alphabet.
* @private
*/
_chars: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",
/** Convert from a bitArray to a base64 string. */
fromBits: function (arr, _noEquals, _url) {
var out = "", i, bits=0, c = _sjcl.codec.base64._chars, ta=0, bl = _sjcl.bitArray.bitLength(arr);
if (_url) {
c = c.substr(0,62) + '-_';
}
for (i=0; out.length * 6 < bl; ) {
out += c.charAt((ta ^ arr[i]>>>bits) >>> 26);
if (bits < 6) {
ta = arr[i] << (6-bits);
bits += 26;
i++;
} else {
ta <<= 6;
bits -= 6;
}
}
while ((out.length & 3) && !_noEquals) { out += "="; }
return out;
},
/** Convert from a base64 string to a bitArray */
toBits: function(str, _url) {
str = str.replace(/\s|=/g,'');
var out = [], i, bits=0, c = _sjcl.codec.base64._chars, ta=0, x;
if (_url) {
c = c.substr(0,62) + '-_';
}
for (i=0; i<str.length; i++) {
x = c.indexOf(str.charAt(i));
if (x < 0) {
throw new _sjcl.exception.invalid("this isn't base64!");
}
if (bits > 26) {
bits -= 26;
out.push(ta ^ x>>>bits);
ta = x << (32-bits);
} else {
bits += 6;
ta ^= x << (32-bits);
}
}
if (bits&56) {
out.push(_sjcl.bitArray.partial(bits&56, ta, 1));
}
return out;
}
};
_sjcl.codec.base64url = {
fromBits: function (arr) { return _sjcl.codec.base64.fromBits(arr,1,1); },
toBits: function (str) { return _sjcl.codec.base64.toBits(str,1); }
};
/** @fileOverview Javascript SHA-256 implementation.
*
* An older version of this implementation is available in the public
* domain, but this one is (c) <NAME>, <NAME>, <NAME>,
* Stanford University 2008-2010 and BSD-licensed for liability
* reasons.
*
* Special thanks to <NAME> for pointing out several bugs in
* this code.
*
* @author <NAME>
* @author <NAME>
* @author <NAME>
*/
/**
* Context for a SHA-256 operation in progress.
* @constructor
*/
_sjcl.hash.sha256 = function (hash) {
if (!this._key[0]) { this._precompute(); }
if (hash) {
this._h = hash._h.slice(0);
this._buffer = hash._buffer.slice(0);
this._length = hash._length;
} else {
this.reset();
}
};
/**
* Hash a string or an array of words.
* @static
* @param {bitArray|String} data the data to hash.
* @return {bitArray} The hash value, an array of 16 big-endian words.
*/
_sjcl.hash.sha256.hash = function (data) {
return (new _sjcl.hash.sha256()).update(data).finalize();
};
_sjcl.hash.sha256.prototype = {
/**
* The hash's block size, in bits.
* @constant
*/
blockSize: 512,
/**
* Reset the hash state.
* @return this
*/
reset:function () {
this._h = this._init.slice(0);
this._buffer = [];
this._length = 0;
return this;
},
/**
* Input several words to the hash.
* @param {bitArray|String} data the data to hash.
* @return this
*/
update: function (data) {
if (typeof data === "string") {
data = _sjcl.codec.utf8String.toBits(data);
}
var i, b = this._buffer = _sjcl.bitArray.concat(this._buffer, data),
ol = this._length,
nl = this._length = ol + _sjcl.bitArray.bitLength(data);
if (nl > 9007199254740991){
throw new _sjcl.exception.invalid("Cannot hash more than 2^53 - 1 bits");
}
if (typeof Uint32Array !== 'undefined') {
var c = new Uint32Array(b);
var j = 0;
for (i = 512+ol - ((512+ol) & 511); i <= nl; i+= 512) {
this._block(c.subarray(16 * j, 16 * (j+1)));
j += 1;
}
b.splice(0, 16 * j);
} else {
for (i = 512+ol - ((512+ol) & 511); i <= nl; i+= 512) {
this._block(b.splice(0,16));
}
}
return this;
},
/**
* Complete hashing and output the hash value.
* @return {bitArray} The hash value, an array of 8 big-endian words.
*/
finalize:function () {
var i, b = this._buffer, h = this._h;
// Round out and push the buffer
b = _sjcl.bitArray.concat(b, [_sjcl.bitArray.partial(1,1)]);
// Round out the buffer to a multiple of 16 words, less the 2 length words.
for (i = b.length + 2; i & 15; i++) {
b.push(0);
}
// append the length
b.push(Math.floor(this._length / 0x100000000));
b.push(this._length | 0);
while (b.length) {
this._block(b.splice(0,16));
}
this.reset();
return h;
},
/**
* The SHA-256 initialization vector, to be precomputed.
* @private
*/
_init:[],
/*
_init:[0x6a09e667,0xbb67ae85,0x3c6ef372,0xa54ff53a,0x510e527f,0x9b05688c,0x1f83d9ab,0x5be0cd19],
*/
/**
* The SHA-256 hash key, to be precomputed.
* @private
*/
_key:[],
/*
_key:
[0x428a2f98, 0x71374491, <KEY>, 0xe9b5dba5, <KEY>, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2],
*/
/**
* Function to precompute _init and _key.
* @private
*/
_precompute: function () {
var i = 0, prime = 2, factor, isPrime;
function frac(x) { return (x-Math.floor(x)) * 0x100000000 | 0; }
for (; i<64; prime++) {
isPrime = true;
for (factor=2; factor*factor <= prime; factor++) {
if (prime % factor === 0) {
isPrime = false;
break;
}
}
if (isPrime) {
if (i<8) {
this._init[i] = frac(Math.pow(prime, 1/2));
}
this._key[i] = frac(Math.pow(prime, 1/3));
i++;
}
}
},
/**
* Perform one cycle of SHA-256.
* @param {Uint32Array|bitArray} w one block of words.
* @private
*/
_block:function (w) {
var i, tmp, a, b,
h = this._h,
k = this._key,
h0 = h[0], h1 = h[1], h2 = h[2], h3 = h[3],
h4 = h[4], h5 = h[5], h6 = h[6], h7 = h[7];
/* Rationale for placement of |0 :
* If a value can overflow is original 32 bits by a factor of more than a few
* million (2^23 ish), there is a possibility that it might overflow the
* 53-bit mantissa and lose precision.
*
* To avoid this, we clamp back to 32 bits by |'ing with 0 on any value that
* propagates around the loop, and on the hash state h[]. I don't believe
* that the clamps on h4 and on h0 are strictly necessary, but it's close
* (for h4 anyway), and better safe than sorry.
*
* The clamps on h[] are necessary for the output to be correct even in the
* common case and for short inputs.
*/
for (i=0; i<64; i++) {
// load up the input word for this round
if (i<16) {
tmp = w[i];
} else {
a = w[(i+1 ) & 15];
b = w[(i+14) & 15];
tmp = w[i&15] = ((a>>>7 ^ a>>>18 ^ a>>>3 ^ a<<25 ^ | |
from . import ClientCaches
from . import ClientConstants as CC
from . import ClientGUIFunctions
from . import ClientGUIMenus
from . import ClientGUIShortcuts
from . import HydrusConstants as HC
from . import HydrusData
from . import HydrusExceptions
from . import HydrusGlobals as HG
import os
from qtpy import QtCore as QC
from qtpy import QtWidgets as QW
from qtpy import QtGui as QG
from . import QtPorting as QP
from . import QtPorting as QP
CHILD_POSITION_PADDING = 24
FUZZY_PADDING = 10
def GetDisplayPosition( window ):
return QW.QApplication.desktop().availableGeometry( window ).topLeft()
def GetDisplaySize( window ):
return QW.QApplication.desktop().availableGeometry( window ).size()
def GetSafePosition( position ):
( p_x, p_y ) = position
# some window managers size the windows just off screen to cut off borders
# so choose a test position that's a little more lenient
( test_x, test_y ) = ( p_x + FUZZY_PADDING, p_y + FUZZY_PADDING )
screen = QW.QApplication.screenAt( QC.QPoint( test_x, test_y ) )
if screen:
display_index = QW.QApplication.screens().index( screen )
else:
display_index = None
if display_index is None:
return ( -1, -1 )
else:
return position
def GetSafeSize( tlw, min_size, gravity ):
( min_width, min_height ) = min_size
frame_padding = tlw.frameGeometry().size() - tlw.size()
parent = tlw.parentWidget()
if parent is None:
width = min_width
height = min_height
else:
parent_window = parent.window()
# when we initialise, we might not have a frame yet because we haven't done show() yet
# so borrow main gui's
if frame_padding.isEmpty():
main_gui = HG.client_controller.gui
if main_gui is not None and QP.isValid( main_gui ) and not main_gui.isFullScreen():
frame_padding = main_gui.frameGeometry().size() - main_gui.size()
if parent_window.isFullScreen():
parent_available_size = parent_window.size()
else:
parent_frame_size = parent_window.frameGeometry().size()
parent_available_size = parent_frame_size - frame_padding
parent_available_width = parent_available_size.width()
parent_available_height = parent_available_size.height()
( width_gravity, height_gravity ) = gravity
if width_gravity == -1:
width = min_width
else:
max_width = parent_available_width - ( 2 * CHILD_POSITION_PADDING )
width = int( width_gravity * max_width )
if height_gravity == -1:
height = min_height
else:
max_height = parent_available_height - ( 2 * CHILD_POSITION_PADDING )
height = int( height_gravity * max_height )
display_size = GetDisplaySize( tlw )
display_available_size = display_size - frame_padding
width = min( display_available_size.width() - 2 * CHILD_POSITION_PADDING, width )
height = min( display_available_size.height() - 2 * CHILD_POSITION_PADDING, height )
return ( width, height )
def ExpandTLWIfPossible( tlw, frame_key, desired_size_delta ):
new_options = HG.client_controller.new_options
( remember_size, remember_position, last_size, last_position, default_gravity, default_position, maximised, fullscreen ) = new_options.GetFrameLocation( frame_key )
if not tlw.isMaximized() and not tlw.isFullScreen():
( current_width, current_height ) = tlw.size().toTuple()
( desired_delta_width, desired_delta_height ) = desired_size_delta
desired_width = current_width
if desired_delta_width > 0:
desired_width = current_width + desired_delta_width + FUZZY_PADDING
desired_height = current_height
if desired_delta_height > 0:
desired_height = current_height + desired_delta_height + FUZZY_PADDING
( width, height ) = GetSafeSize( tlw, ( desired_width, desired_height ), default_gravity )
if width > current_width or height > current_height:
size = QC.QSize( width, height )
tlw.resize( size )
#tlw.setMinimumSize( tlw.sizeHint() )
SlideOffScreenTLWUpAndLeft( tlw )
def MouseIsOnMyDisplay( window ):
window_handle = window.window().windowHandle()
if window_handle is None:
return False
window_screen = window_handle.screen()
mouse_screen = QW.QApplication.screenAt( QG.QCursor.pos() )
return mouse_screen is window_screen
def SaveTLWSizeAndPosition( tlw, frame_key ):
if tlw.isMinimized():
return
new_options = HG.client_controller.new_options
( remember_size, remember_position, last_size, last_position, default_gravity, default_position, maximised, fullscreen ) = new_options.GetFrameLocation( frame_key )
maximised = tlw.isMaximized()
fullscreen = tlw.isFullScreen()
if not ( maximised or fullscreen ):
safe_position = GetSafePosition( ( tlw.x(), tlw.y() ) )
if safe_position != ( -1, -1 ):
last_size = tlw.size().toTuple()
last_position = safe_position
new_options.SetFrameLocation( frame_key, remember_size, remember_position, last_size, last_position, default_gravity, default_position, maximised, fullscreen )
def SetInitialTLWSizeAndPosition( tlw, frame_key ):
new_options = HG.client_controller.new_options
( remember_size, remember_position, last_size, last_position, default_gravity, default_position, maximised, fullscreen ) = new_options.GetFrameLocation( frame_key )
parent = tlw.parentWidget()
if remember_size and last_size is not None:
( width, height ) = last_size
tlw.resize( QC.QSize( width, height ) )
else:
( min_width, min_height ) = tlw.sizeHint().toTuple()
( width, height ) = GetSafeSize( tlw, ( min_width, min_height ), default_gravity )
tlw.resize( QC.QSize( width, height ) )
min_width = min( 240, width )
min_height = min( 240, height )
tlw.setMinimumSize( QP.TupleToQSize( ( min_width, min_height ) ) )
#
if remember_position and last_position is not None:
safe_position = GetSafePosition( last_position )
if safe_position != QC.QPoint( -1, -1 ):
tlw.move( QP.TupleToQPoint( safe_position ) )
elif default_position == 'topleft':
if parent is not None:
if isinstance( parent, QW.QWidget ):
parent_tlw = parent.window()
else:
parent_tlw = parent
( parent_x, parent_y ) = parent_tlw.pos().toTuple()
tlw.move( QP.TupleToQPoint( ( parent_x + CHILD_POSITION_PADDING, parent_y + CHILD_POSITION_PADDING ) ) )
else:
safe_position = GetSafePosition( ( 0 + CHILD_POSITION_PADDING, 0 + CHILD_POSITION_PADDING ) )
if safe_position != QC.QPoint( -1, -1 ):
tlw.move( QP.TupleToQPoint( safe_position ) )
SlideOffScreenTLWUpAndLeft( tlw )
elif default_position == 'center':
if parent is not None:
QP.CenterOnWindow( parent, tlw )
# Comment from before the Qt port: if these aren't callafter, the size and pos calls don't stick if a restore event happens
if maximised:
tlw.showMaximized()
if fullscreen and not HC.PLATFORM_MACOS:
tlw.showFullScreen()
def SlideOffScreenTLWUpAndLeft( tlw ):
tlw_frame_rect = tlw.frameGeometry()
tlw_top_left = tlw_frame_rect.topLeft()
tlw_bottom_right = tlw_frame_rect.bottomRight()
tlw_right = tlw_bottom_right.x()
tlw_bottom = tlw_bottom_right.y()
display_size = GetDisplaySize( tlw )
display_pos = GetDisplayPosition( tlw )
display_right = display_pos.x() + display_size.width() - CHILD_POSITION_PADDING
display_bottom = display_pos.y() + display_size.height() - CHILD_POSITION_PADDING
move_x = tlw_right > display_right
move_y = tlw_bottom > display_bottom
if move_x or move_y:
delta_x = min( display_right - tlw_right, 0 )
delta_y = min( display_bottom - tlw_bottom, 0 )
delta_point = QC.QPoint( delta_x, delta_y )
safe_pos = tlw_top_left + delta_point
tlw.move( safe_pos )
class NewDialog( QP.Dialog ):
def __init__( self, parent, title, do_not_activate = False ):
QP.Dialog.__init__( self, parent )
if do_not_activate:
self.setAttribute( QC.Qt.WA_ShowWithoutActivating )
self.setWindowTitle( title )
self._last_move_pub = 0.0
self._new_options = HG.client_controller.new_options
self.setWindowIcon( QG.QIcon( HG.client_controller.frame_icon_pixmap ) )
HG.client_controller.ResetIdleTimer()
self._widget_event_filter = QP.WidgetEventFilter( self )
def moveEvent( self, event ):
if HydrusData.TimeHasPassedFloat( self._last_move_pub + 0.1 ):
HG.client_controller.pub( 'top_level_window_move_event' )
self._last_move_pub = HydrusData.GetNowPrecise()
event.ignore()
def _CanCancel( self ):
return True
def _CanOK( self ):
return True
def _ReadyToClose( self, value ):
return True
def _SaveOKPosition( self ):
pass
def _TryEndModal( self, value ):
if not self.isModal(): # in some rare cases (including spammy AutoHotkey, looks like), this can be fired before the dialog can clean itself up
return False
if not self._ReadyToClose( value ):
return False
if value == QW.QDialog.Rejected:
if not self._CanCancel():
return False
self.SetCancelled( True )
if value == QW.QDialog.Accepted:
if not self._CanOK():
return False
self._SaveOKPosition()
self.CleanBeforeDestroy()
try:
self.done( value )
except Exception as e:
HydrusData.ShowText( 'This dialog seems to have been unable to close for some reason. I am printing the stack to the log. The dialog may have already closed, or may attempt to close now. Please inform hydrus dev of this situation. I recommend you restart the client if you can. If the UI is locked, you will have to kill it | |
<reponame>oliverbritton/drg-pom
# neuron_biomarkers.py
# calculation of AP biomarkers from neuronal voltage traces
import sys
import numpy as np
import pandas as pd
from scipy import optimize
from matplotlib import pyplot as plt
from . import davidson_biomarkers as db
from .. import simulation_helpers as sh
from .. import analysis as an
# Biomarkers to manage and analyse neuronal simulation data and potentially experimental
# data too
RHEO_FAIL = np.nan # Code to return if rheobase calculation fails.
# np.nan == np.nan returns False so use is not instead. pom code
# relies on this value being nan to interface with pandas correctly.
def calculate_biomarkers(traces, model):
" Calculate every biomarker and output to dict "
" TODO: Use the rheobase to work out what simulation to run to calculate biomarkers "
" off of (at rheobase) "
# biomarker_names = ['APFullWidth', 'APPeak', 'APRiseTime', 'APSlopeMin', 'APSlopeMax',. 'AHPAmp', 'AHPTau', 'ISI', 'RMP', 'Rheobase']
biomarkers = calculate_simple_biomarkers(traces, model)
biomarkers['RMP'] = np.mean(calculate_rmp(traces))
# Need to do rheobase separately
biomarkers['Rheobase'] = calculate_rheobase(model, amp_step=0.1, amp_max=5, make_plot=False,)
return biomarkers
def average_biomarker_values(biomarkers, how_to_handle_nans='return'):
" Average biomarker values for multiple APs while handling biomarkers that "
if how_to_handle_nans == 'return': # Advantage is we return nan if there are any nans - good for calibration and trouble shooting - shows up weird models easily.
pass
elif how_to_handle_nans == 'remove': # Risky option. Advantage is we still get a number back in mixed cases of nan and non-nan biomarkers, which is potentially risky as it hides a problem in one or more APs.
biomarkers = np.array(biomarkers)
if biomarkers[~np.isnan(biomarkers)].size == 0:
return np.nan
else:
biomarkers = biomarkers[~np.isnan(biomarkers)]
else:
raise ValueError("{} is not an accepted nan handling method.".format(how_to_handle_nans))
mean_result = np.mean(biomarkers)
return mean_result
def calculate_simple_biomarkers(traces, model="Not needed", how_to_handle_nans='return'):
""" Calculate every biomarker that can be calculated from a normal simulation trace and output to dict - rheobase and RMP need to be calculated separately."""
biomarkers = {}
# biomarker_names = ['APFullWidth', 'APPeak', 'APRiseTime', 'APSlopeMin', 'APSlopeMax',. 'AHPAmp', 'AHPTau', 'ISI', 'RMP', 'Rheobase']
def error_handle(filename, traces): # Error handler for finding out why biomarkers are throwing errors.
import pickle
print(sys.exc_info())
print(traces['numAPs'])
plt.figure()
for t,v in zip(traces['t'],traces['v']):
plt.plot(t,v)
with open(filename, 'wb') as handle:
pickle.dump(traces, handle)
print("Error, traces dumped to {}.".format(filename))
try:
biomarkers['APFullWidth'] = average_biomarker_values(calculate_ap_full_width(traces,threshold=5.,method='gradient'), how_to_handle_nans)
except:
error_handle('fullwidth.pickle',traces)
try:
biomarkers['APHalfWidth'] = average_biomarker_values(calculate_ap_half_width(traces,threshold=5.,method='gradient'), how_to_handle_nans)
except:
error_handle('halfwidth.pickle',traces)
biomarkers['APPeak'] = average_biomarker_values(calculate_ap_peak(traces),how_to_handle_nans)
try:
biomarkers['APRiseTime'] = average_biomarker_values(calculate_ap_rise_time(traces,dvdtthreshold=5),how_to_handle_nans)
except:
error_handle('risetime.pickle',traces)
ap_slope_mins, ap_slope_maxs = calculate_ap_slope_min_max(traces)
biomarkers['APSlopeMin'] = average_biomarker_values(ap_slope_mins, how_to_handle_nans)
biomarkers['APSlopeMax'] = average_biomarker_values(ap_slope_maxs, how_to_handle_nans)
biomarkers['Threshold'] = average_biomarker_values(calculate_threshold(traces), how_to_handle_nans)
amp, tau, trough = fit_afterhyperpolarization(traces=traces,dvdt_threshold=5, ahp_model='single_exp', full_output=False)
"""
try:
amp, tau = fit_afterhyperpolarization(traces=traces,dvdt_threshold=5, ahp_model='single_exp', full_output=False)
except:
error_handle('fitahp.pickle',traces)
amp=0
tau=0
"""
biomarkers['AHPAmp'] = amp
biomarkers['AHPTau'] = tau
biomarkers['AHPTrough'] = trough
biomarkers['ISI'] = inter_spike_interval(traces)
biomarkers['numAPs'] = traces['numAPs']
return biomarkers
def compute_model_biomarkers(model=None, mechanisms=None, make_plot=False, sim_kwargs=None, xlims=None):
" Find all standard biomarkers of a model or mechanism set. "
biomarkers = {}
if model == None:
model = sh.build_model(mechanisms)
# Else use model
if sim_kwargs:
sim_kwargs['model'] = model
else:
sim_kwargs = sh.get_default_simulation_kwargs(model=model)
rheobase = calculate_rheobase(model, amp_step=0.1, amp_max=5, make_plot=False, sim_kwargs = sim_kwargs)
if (isinstance(rheobase,float) == False) & (isinstance(rheobase,int) == False):
# Rheobase not found, don't calculate other biomarkers
find_other_biomarkers = False
else:
find_other_biomarkers = True
if sim_kwargs:
sim_kwargs['amp'] = rheobase
sim_kwargs['model'] = model
else:
sim_kwargs = sh.get_default_simulation_kwargs(amp=rheobase, model=model)
sim_kwargs['make_plot'] = make_plot
if find_other_biomarkers:
output = sh.simulation(**sim_kwargs)
t = output['t']; v = output['v']
t = t[::2]; v = v[::2] # 20 kHz
traces = split_trace_into_aps(t,v)
biomarkers = calculate_simple_biomarkers(traces,model,how_to_handle_nans='remove')
# RMP
rmp_kwargs = {'amp':0.0, 'dur':3000., 'delay':0., 'interval':0., 'num_stims':1, 't_stop':3000.}
for kwarg in sim_kwargs:
# Write in sim_kwargs where they are not already present in rmp_kwargs
# so that non-RMP specific kwargs are consistent between simulations
if kwarg not in rmp_kwargs:
rmp_kwargs[kwarg] = sim_kwargs[kwarg]
output = sh.simulation(**rmp_kwargs)
rmp_t = output['t']; rmp_v = output['v']
rmp_t = rmp_t[::2]; rmp_v = rmp_v[::2] # 20 kHz
rmp_traces = split_trace_into_aps(rmp_t,rmp_v)
rmp = np.mean(calculate_rmp(rmp_traces))
if (make_plot & (xlims != None)):
plt.xlim(xlims[0], xlims[1])
# If we calculated other biomarkers, add extras calculated in separate simulations.
# If we didn't add to empty dictionary, will leave nans when added to master dataframe
# which is what we want.
biomarkers['Rheobase'] = rheobase
biomarkers['RMP'] = rmp
return biomarkers
" --- Calculation and trace manipulation functions -- "
def split_trace_into_aps(t,v,threshold=0,time_threshold=5, check_voltage_gradient=True):#
"""
Threshold is at 0 mV which can let RF to cause spurious AP detection unless
we perform a voltage gradient check, which defaults to True.
-- Old ideas to solve the spurious AP detection problem with threshold at 0 mV --
One idea is to do split trace and then calculate AP width using a voltage threshold of something like -25 mV.
Then, if AP width is really long (> 100 ms?), redo the calculation with a lower threshold (0 mV?).
If mean AP width is then < 100 ms, we use the new split. We could write a log file to say that this happened,
with the trace in it.
However, that is complex and may break if something comes up I haven't thought of.
Instead we could reset the default threshold to 0 mV but add in a gradient check on the voltage crossing from below.
Currently a gradient threshold of 1 mV/ms seems like it should be effective although I don't have any examples of slow
calcium initated APs to test against.
"""
# Units for defaults
# t, time_threshold - ms
# v, threshold - mV
assert len(t) == len(v), "v and t length mismatch"
crossings = []
time_crossings = np.array([])
# Looks for crossings from below
for i,voltage in enumerate(v[:-1]):
if (voltage < threshold) & (v[i+1] >= threshold):
# Check local voltage gradient if neeeded, if gradient is too small ignore the crossing
# Time window set to 1.0 to try to counteract bug with averaging too much of the pre-upstroke.
if (check_voltage_gradient) & (is_voltage_gradient_too_small(i, t, v, dvdt_threshold=1.0, time_window=1.0)):
continue # Don't add the crossing if the local voltage gradient is small and we're checking for that
crossings.append(i)
time_crossings = np.append(time_crossings,t[i])
# For each crossing, remove all instances within the time threshold, leaving only the first crossing of the threshold
grouped_crossings = np.zeros(np.size(crossings),float)
for i in range(len(crossings)-1):
if grouped_crossings[i] == 0:
nearby_crossings = np.array( (time_crossings[i+1:] - time_crossings[i]) < time_threshold )
# Assign
grouped_crossings[i+1:] += nearby_crossings
assert all(grouped_crossings < 2), "Grouped crossing grouped more than once"
firstCrossIndices = np.where(grouped_crossings == 0)
# Need to turn crossings into a numpy array to index it with np.where
firstCrossings = np.array(crossings)[firstCrossIndices]
numAPs = len(firstCrossings)
assert numAPs >= 0, "Negative number of APs!"
# Assign time and voltage to traces
times = []
voltages = []
# If 1 or 0 APs, return 1 trace, otherwise...
# if (numAPs == 0) | (numAPs == 1):
# times.append(t)
# voltages.append(v)
"""
There are some commented assumptions about where traces begin and end here. The core idea is that all data points in the trace have to be assigned to 1 and only 1 AP. If areas of quiescence are a problem for particular analysis methods, they will be stripped out by other specialised functions.
Our goal in this function is to divide up the trace without leaving any of it out, so that we have everything for any future analysis.
"""
# If we have multiple APs, for each AP find the minimum value
# of Vm before the next AP
if numAPs > 0:
startIdx = np.zeros(numAPs,int)
endIdx = np.zeros(numAPs,int)
for AP in range(numAPs):
if AP == 0:
startIdx[0] = 0 # Start of first AP is beginning of trace
else:
startIdx[AP] = endIdx[AP-1]+1 # Start of all other APs is after last AP
if AP == numAPs-1:
endIdx[AP] = len(v)-1 # End of last AP is end of trace
else:
# Calculate end of this trace - end is minimum voltage of this trace
# From threshold | |
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer, attention_probs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output, attention_probs = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output, attention_probs
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str) or (
sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)
):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output, attention_probs = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output, attention_probs
class BertImageSelfAttention(nn.Module):
def __init__(self, config):
super(BertImageSelfAttention, self).__init__()
if config.v_hidden_size % config.v_num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.v_hidden_size, config.v_num_attention_heads)
)
self.num_attention_heads = config.v_num_attention_heads
self.attention_head_size = int(
config.v_hidden_size / config.v_num_attention_heads
)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.v_hidden_size, self.all_head_size)
self.key = nn.Linear(config.v_hidden_size, self.all_head_size)
self.value = nn.Linear(config.v_hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.v_attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer, attention_probs
class BertImageSelfOutput(nn.Module):
def __init__(self, config):
super(BertImageSelfOutput, self).__init__()
self.dense = nn.Linear(config.v_hidden_size, config.v_hidden_size)
self.LayerNorm = BertLayerNorm(config.v_hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.v_hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertImageAttention(nn.Module):
def __init__(self, config):
super(BertImageAttention, self).__init__()
self.self = BertImageSelfAttention(config)
self.output = BertImageSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output, attention_probs = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output, attention_probs
class BertImageIntermediate(nn.Module):
def __init__(self, config):
super(BertImageIntermediate, self).__init__()
self.dense = nn.Linear(config.v_hidden_size, config.v_intermediate_size)
if isinstance(config.v_hidden_act, str) or (
sys.version_info[0] == 2 and isinstance(config.v_hidden_act, unicode)
):
self.intermediate_act_fn = ACT2FN[config.v_hidden_act]
else:
self.intermediate_act_fn = config.v_hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertImageOutput(nn.Module):
def __init__(self, config):
super(BertImageOutput, self).__init__()
self.dense = nn.Linear(config.v_intermediate_size, config.v_hidden_size)
self.LayerNorm = BertLayerNorm(config.v_hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.v_hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertImageLayer(nn.Module):
def __init__(self, config):
super(BertImageLayer, self).__init__()
self.attention = BertImageAttention(config)
self.intermediate = BertImageIntermediate(config)
self.output = BertImageOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output, attention_probs = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output, attention_probs
class BertBiAttention(nn.Module):
def __init__(self, config):
super(BertBiAttention, self).__init__()
if config.bi_hidden_size % config.bi_num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.bi_hidden_size, config.bi_num_attention_heads)
)
self.num_attention_heads = config.bi_num_attention_heads
self.attention_head_size = int(
config.bi_hidden_size / config.bi_num_attention_heads
)
self.all_head_size = self.num_attention_heads * self.attention_head_size
# self.scale = nn.Linear(1, self.num_attention_heads, bias=False)
# self.scale_act_fn = ACT2FN['relu']
self.query1 = nn.Linear(config.v_hidden_size, self.all_head_size)
self.key1 = nn.Linear(config.v_hidden_size, self.all_head_size)
self.value1 = nn.Linear(config.v_hidden_size, self.all_head_size)
# self.logit1 = nn.Linear(config.hidden_size, self.num_attention_heads)
self.dropout1 = nn.Dropout(config.v_attention_probs_dropout_prob)
self.query2 = nn.Linear(config.hidden_size, self.all_head_size)
self.key2 = nn.Linear(config.hidden_size, self.all_head_size)
self.value2 = nn.Linear(config.hidden_size, self.all_head_size)
# self.logit2 = nn.Linear(config.hidden_size, self.num_attention_heads)
self.dropout2 = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, input_tensor1, attention_mask1, input_tensor2, attention_mask2, co_attention_mask=None, use_co_attention_mask=False):
# for vision input.
mixed_query_layer1 = self.query1(input_tensor1)
mixed_key_layer1 = self.key1(input_tensor1)
mixed_value_layer1 = self.value1(input_tensor1)
# mixed_logit_layer1 = self.logit1(input_tensor1)
query_layer1 = self.transpose_for_scores(mixed_query_layer1)
key_layer1 = self.transpose_for_scores(mixed_key_layer1)
value_layer1 = self.transpose_for_scores(mixed_value_layer1)
# logit_layer1 = self.transpose_for_logits(mixed_logit_layer1)
# for text input:
mixed_query_layer2 = self.query2(input_tensor2)
mixed_key_layer2 = self.key2(input_tensor2)
mixed_value_layer2 = self.value2(input_tensor2)
# mixed_logit_layer2 = self.logit2(input_tensor2)
query_layer2 = self.transpose_for_scores(mixed_query_layer2)
key_layer2 = self.transpose_for_scores(mixed_key_layer2)
value_layer2 = self.transpose_for_scores(mixed_value_layer2)
# logit_layer2 = self.transpose_for_logits(mixed_logit_layer2)
# Take the dot product between "query2" and "key1" to get the raw attention scores for value 1.
attention_scores1 = torch.matmul(query_layer2, key_layer1.transpose(-1, -2))
attention_scores1 = attention_scores1 / math.sqrt(self.attention_head_size)
attention_scores1 = attention_scores1 + attention_mask1
if use_co_attention_mask:
attention_scores1 = attention_scores1 + co_attention_mask.permute(0,1,3,2)
# Normalize the attention scores to probabilities.
attention_probs1 = nn.Softmax(dim=-1)(attention_scores1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs1 = self.dropout1(attention_probs1)
context_layer1 = torch.matmul(attention_probs1, value_layer1)
context_layer1 = context_layer1.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape1 = context_layer1.size()[:-2] + (self.all_head_size,)
context_layer1 = context_layer1.view(*new_context_layer_shape1)
# Take the dot product between "query1" and "key2" to get the raw attention scores for value 2.
attention_scores2 = torch.matmul(query_layer1, key_layer2.transpose(-1, -2))
attention_scores2 = attention_scores2 / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
# we can comment this line for single flow.
attention_scores2 = attention_scores2 + attention_mask2
if use_co_attention_mask:
attention_scores2 = attention_scores2 + co_attention_mask
# Normalize the attention scores to probabilities.
attention_probs2 = nn.Softmax(dim=-1)(attention_scores2)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs2 = self.dropout2(attention_probs2)
context_layer2 = torch.matmul(attention_probs2, value_layer2)
context_layer2 = context_layer2.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape2 = context_layer2.size()[:-2] + (self.all_head_size,)
context_layer2 = context_layer2.view(*new_context_layer_shape2)
return context_layer1, context_layer2, (attention_probs1, attention_probs2)
class BertBiOutput(nn.Module):
def __init__(self, config):
super(BertBiOutput, self).__init__()
self.dense1 = nn.Linear(config.bi_hidden_size, config.v_hidden_size)
self.LayerNorm1 = BertLayerNorm(config.v_hidden_size, eps=1e-12)
self.dropout1 = nn.Dropout(config.v_hidden_dropout_prob)
self.q_dense1 = nn.Linear(config.bi_hidden_size, config.v_hidden_size)
self.q_dropout1 = nn.Dropout(config.v_hidden_dropout_prob)
self.dense2 = nn.Linear(config.bi_hidden_size, config.hidden_size)
self.LayerNorm2 = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout2 = nn.Dropout(config.hidden_dropout_prob)
self.q_dense2 = nn.Linear(config.bi_hidden_size, config.hidden_size)
self.q_dropout2 = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states1, input_tensor1, hidden_states2, input_tensor2):
context_state1 = self.dense1(hidden_states1)
context_state1 = self.dropout1(context_state1)
context_state2 = self.dense2(hidden_states2)
context_state2 = self.dropout2(context_state2)
hidden_states1 = self.LayerNorm1(context_state1 + input_tensor1)
hidden_states2 = self.LayerNorm2(context_state2 + input_tensor2)
return hidden_states1, hidden_states2
class BertConnectionLayer(nn.Module):
def __init__(self, config):
super(BertConnectionLayer, self).__init__()
self.biattention = BertBiAttention(config)
self.biOutput = BertBiOutput(config)
self.v_intermediate = BertImageIntermediate(config)
self.v_output = BertImageOutput(config)
self.t_intermediate = BertIntermediate(config)
self.t_output = BertOutput(config)
def forward(self, input_tensor1, attention_mask1, input_tensor2, attention_mask2, co_attention_mask=None, use_co_attention_mask=False):
bi_output1, bi_output2, co_attention_probs = self.biattention(
input_tensor1, attention_mask1, input_tensor2, attention_mask2, co_attention_mask, use_co_attention_mask
)
attention_output1, attention_output2 = self.biOutput(bi_output2, input_tensor1, bi_output1, input_tensor2)
intermediate_output1 = self.v_intermediate(attention_output1)
layer_output1 = self.v_output(intermediate_output1, attention_output1)
intermediate_output2 = self.t_intermediate(attention_output2)
layer_output2 = self.t_output(intermediate_output2, attention_output2)
return layer_output1, layer_output2, co_attention_probs
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
# in the bert encoder, we need to extract three things here.
# text bert layer: BertLayer
# vision bert layer: BertImageLayer
# Bi-Attention: Given the output of two bertlayer, perform bi-directional
# attention and add on two layers.
self.FAST_MODE = config.fast_mode
self.with_coattention = config.with_coattention
self.v_biattention_id = config.v_biattention_id
self.t_biattention_id = config.t_biattention_id
self.in_batch_pairs = config.in_batch_pairs
self.fixed_t_layer = config.fixed_t_layer
self.fixed_v_layer = config.fixed_v_layer
layer = BertLayer(config)
v_layer = BertImageLayer(config)
connect_layer = BertConnectionLayer(config)
self.layer = nn.ModuleList(
[copy.deepcopy(layer) for _ in range(config.num_hidden_layers)]
)
self.v_layer = nn.ModuleList(
[copy.deepcopy(v_layer) for _ in range(config.v_num_hidden_layers)]
)
self.c_layer = nn.ModuleList(
[copy.deepcopy(connect_layer) for _ in range(len(config.v_biattention_id))]
)
def forward(
self,
txt_embedding,
image_embedding,
txt_attention_mask,
image_attention_mask,
co_attention_mask=None,
output_all_encoded_layers=True,
output_all_attention_masks=False,
):
v_start = 0
t_start = 0
count = 0
all_encoder_layers_t = []
all_encoder_layers_v = []
all_attention_mask_t = []
all_attnetion_mask_v = []
all_attention_mask_c = []
batch_size, num_words, t_hidden_size = txt_embedding.size()
_, num_regions, v_hidden_size = image_embedding.size()
use_co_attention_mask = False
for v_layer_id, t_layer_id in zip(self.v_biattention_id, self.t_biattention_id):
v_end = v_layer_id
t_end = t_layer_id
assert self.fixed_t_layer | |
<filename>archive/scripts/make_features.py
import numpy as np
import math
from scipy import stats
from scipy import signal
import sys
from scipy.signal import butter, lfilter, filtfilt
from matplotlib import pyplot as plt
import os.path
data_path = '../data/cleaned_data/'
save_path = '../data/features/'
DT_SCORES = 30 # seconds
DT_MOTION = 15 # seconds
DT_HR = 2 # seconds
DT_CLOCK = 15 # seconds
WINDOW_SIZE = 11 # number of epochs to consider centered around the time point, must be odd
WINDOW_SIZE_HR = 11 #
WANT_CIRC = True # Boolean for if we should compute circadian feature
SECONDS_PER_HOUR = 3600.0
HOURS_PER_DAY = 24
HR_SMOOTHING_WINDOW_SIZE = 250
def smooth(y, box_pts): # TODO: Use or remove.
box = np.ones(box_pts) / box_pts
y = np.insert(y, y[0:box_pts / 2], 0) # Pad by repeating boundary conditions
y = np.insert(y, y[-box_pts / 2 + 1:], len(y) - 1)
y_smooth = np.convolve(y, box, mode='valid')
return y_smooth
def smooth_gauss(y, box_pts):
box = np.ones(box_pts) / box_pts
mu = box_pts / 2.0
sigma = box_pts / 1.0
for ind in range(0, box_pts):
box[ind] = np.exp(-1 / 2 * (((ind - mu) / sigma) ** 2))
box = box / np.sum(box)
y = np.insert(y, y[0:box_pts / 2], 0) # Pad by repeating boundary conditions
y = np.insert(y, y[-box_pts / 2 + 1:], len(y) - 1)
y_smooth = np.convolve(y, box, mode='valid')
return y_smooth
def make_features(subject_id):
"""
Compute features and save to file; save validation images to outputs/ folder
Args:
subject_id (int): Subject ID
"""
is_started = False
score_output = np.array([])
motion_output = np.array([])
hr_output = np.array([])
clock_output = np.array([])
circ_model_output = np.array([])
print('Making features for Subject ' + str(subject_id))
file_name = data_path + str(subject_id)
print('-- Reading data...')
scores = np.genfromtxt(file_name + '_scores.out', delimiter=',')
hr = np.genfromtxt(file_name + '_hr.out', delimiter=',')
motion = np.genfromtxt(file_name + '_counts.out', delimiter=',')
if WANT_CIRC:
if os.path.isfile(file_name + '_clock_proxy.out'):
circ_model = np.genfromtxt(file_name + '_clock_proxy.out', delimiter=',')
circadian_file_exists = True
else:
circadian_file_exists = False
start_time = np.amin(scores[:, 0])
end_time = min([motion[-1, 0], hr[-1, 0], scores[-1, 0]]) # End time is the minimum of the last valid value.
duration = int(math.floor(end_time - start_time))
print('Duration: ' + str((end_time - start_time) / SECONDS_PER_HOUR) + ' hrs')
hr = process_hr(hr, start_time, end_time)
# motion = process_motion(motion) ## TODO: Use or remove
last_scored_epoch = 0
print('- Looping over epochs...')
# These are for plotting the inputs for debugging
plot_timestamps = []
plot_activity_counts = []
plot_scores = []
plot_heart_rate = []
plot_circadian_model = []
plot_cosine_clock = []
invalid_motion_count = 0
invalid_hr_count = 0
total_epoch_count = 0
for i in range(DT_SCORES / 2, duration, DT_SCORES): # Loops over all 30s epochs
total_epoch_count = total_epoch_count + 1
begin_epoch_time = int(start_time + i - DT_SCORES / 2)
end_epoch_time = int(start_time + i + DT_SCORES / 2)
scores_in_range_condition = (np.array(scores[:, 0]) < end_epoch_time) & (np.array(scores[:, 0]) >= begin_epoch_time)
scores_in_range = np.extract(scores_in_range_condition, scores[:, 1])
if len(scores_in_range) > 0:
epoch = stats.mode(scores_in_range)
epoch = epoch[0]
last_scored_epoch = epoch
else:
epoch = last_scored_epoch
if (np.mean(scores_in_range) - epoch) != 0 and epoch != 0: # Catch misalignment
print('ERROR: Scores in range non-constant. Is something offset by < 30s?')
print(scores_in_range)
print('Scored epoch: ' + str(epoch))
if np.mean(scores_in_range) >= 0: # If negative, epoch was not scored
# Set the sample window, centered at i, over which the heart rate and motion will be collected
sample_begin = int(start_time + i - WINDOW_SIZE * DT_SCORES / 2)
sample_end = int(start_time + i + WINDOW_SIZE * DT_SCORES / 2)
sample_begin_hr = int(start_time + i - WINDOW_SIZE_HR * DT_SCORES / 2)
sample_end_hr = int(start_time + i + WINDOW_SIZE_HR * DT_SCORES / 2)
# Grab all features in range
motion_epoch = get_motion_feature(range(sample_begin, sample_end, DT_MOTION), motion)
hr_epoch = get_hr_feature(range(sample_begin_hr, sample_end_hr, DT_HR), hr)
clock_epoch = get_clock_feature(i)
time_epoch = get_time_feature(i)
if WANT_CIRC and circadian_file_exists:
circ_model_epoch = get_circ_model_feature(start_time + i, circ_model)
if motion_epoch[0] != -1:
motion_valid = True
else:
motion_valid = False
invalid_motion_count = invalid_motion_count + 1
print('Invalid motion in epoch: ' + str(invalid_motion_count))
if hr_epoch[0] != -1:
hr_valid = True
else:
hr_valid = False
invalid_hr_count = invalid_hr_count + 1
print('Invalid heart rate in epoch: ' + str(invalid_hr_count))
# Only append if motion data was valid in range
if motion_valid and hr_valid:
if is_started is False:
is_started = True
score_output = np.append(score_output, epoch)
motion_output = motion_epoch
hr_output = hr_epoch
clock_output = clock_epoch
time_output = time_epoch
if WANT_CIRC and circadian_file_exists:
circ_model_output = circ_model_epoch
else:
score_output = np.append(score_output, epoch)
motion_output = np.vstack([motion_output, motion_epoch])
hr_output = np.vstack([hr_output, hr_epoch])
clock_output = np.vstack([clock_output, clock_epoch])
time_output = np.vstack([time_output, time_epoch])
if WANT_CIRC and circadian_file_exists:
circ_model_output = np.vstack([circ_model_output, circ_model_epoch])
plot_timestamps.append(len(plot_timestamps) + 1)
if epoch > 0:
plot_scores.append(1)
else:
plot_scores.append(0)
motion_epoch = np.array(motion_epoch)
plot_activity_counts.append(np.mean(motion_epoch / 50))
plot_heart_rate.append(hr_epoch[0])
if WANT_CIRC and circadian_file_exists:
plot_circadian_model.append(np.mean(circ_model_epoch))
plot_cosine_clock.append(np.mean(clock_epoch))
# Plot for debugging purposes
plot_heart_rate = np.array(plot_heart_rate)
plot_heart_rate = 5.0 * plot_heart_rate / np.amax(plot_heart_rate)
plt.plot(plot_timestamps, plot_activity_counts)
plt.step(plot_timestamps, plot_scores)
plt.plot(plot_timestamps, plot_heart_rate)
if WANT_CIRC and circadian_file_exists:
plt.plot(plot_timestamps, plot_circadian_model)
plt.plot(plot_timestamps, plot_cosine_clock)
plt.ylim(-1, 8)
plt.savefig('../outputs/feature_validation_' + subject_id + '.png')
plt.close()
print('- Saving features...')
# Save features.
np.savetxt(save_path + subject_id + '_score_feat.csv', score_output, delimiter=",", fmt='%d')
np.savetxt(save_path + subject_id + '_motion_feat.csv', motion_output, delimiter=",", fmt='%f')
np.savetxt(save_path + subject_id + '_hr_feat.csv', hr_output, delimiter=",", fmt='%f')
np.savetxt(save_path + subject_id + '_clock_feat.csv', clock_output, delimiter=",", fmt='%f')
np.savetxt(save_path + subject_id + '_time_feat.csv', time_output, delimiter=",", fmt='%f')
if WANT_CIRC and circadian_file_exists:
np.savetxt(save_path + subject_id + '_circ_model_feat.csv', circ_model_output, delimiter=",", fmt='%f')
def process_motion(motion):
# TODO: Use or remove
return motion
def process_hr(hr, start, end):
"""
Process heart rate.
Args:
hr (np.array): Heart rate data
start (int): Timestamp in seconds of start time
end (int): Timestamp in seconds of end time
Returns:
np.array : Interpolated heart rate, converted from bpm to seconds
"""
time_range = range(int(start), int(end), DT_HR)
indices_where_hr_zero = np.where(hr[:, 1] == 0)[0]
indices_where_hr_nonzero = np.where(hr[:, 1] > 0)[0]
hr[indices_where_hr_zero, 1] = 1 # Placeholder value; gets interpolated over later
hr[:, 1] = 60.0 / hr[:, 1]
hr[indices_where_hr_zero, 1] = np.interp(indices_where_hr_zero, indices_where_hr_nonzero,
hr[indices_where_hr_nonzero, 1])
hr_interp = np.interp(time_range, hr[:, 0], hr[:, 1])
hr_interp = hr_interp / np.std(hr_interp)
hr_interp = smooth_gauss(hr_interp, HR_SMOOTHING_WINDOW_SIZE)
hr = np.hstack((np.transpose([time_range]), np.transpose([hr_interp])))
return hr
def get_motion_feature(sample_range, motion):
"""
Gets motion feature, doing some smoothing of the data
Args:
sample_range ([float]): Timestamps to interpolate motion over
motion (np.array): Activity count data, first column is timestamps, second is counts
Returns:
np.array : motion feature for input to classifiers
"""
motion_bin = np.interp(sample_range, motion[:, 0], motion[:, 1])
mu = len(motion_bin) / 2.0
sigma = len(motion_bin) / 4.0
convolution = 0
count = 0
for m in motion_bin:
convolution = convolution + m * np.exp(-1 / 2 * (((count - mu) / sigma) ** 2))
count = count + 1
return np.array([convolution])
def get_hr_feature(sample_range, hr):
"""
Gets heart rate feature computing standard deviation of heart rate in sample.
Args:
sample_range ([float]): Timestamps to interpolate motion over
heart rate (np.array): Heart rate data, first column is timestamps, second is heart rate
Returns:
np.array : heart rate feature for input to classifiers
"""
heart_rate_in_range_condition = (np.array(hr[:, 0]) >= sample_range[0]) & (np.array(hr[:, 0]) <= sample_range[-1])
time_points_in_range = np.extract(heart_rate_in_range_condition, hr[:, 0])
heart_rate_in_range = np.extract(heart_rate_in_range_condition, hr[:, 1])
if len(heart_rate_in_range) > 1: # Checks to make sure heart rate is valid
raw_hr = np.interp(sample_range, time_points_in_range, heart_rate_in_range)
abs_hr_derivative = np.abs(heart_rate_in_range[0:-1] - heart_rate_in_range[1:])
mu = len(abs_hr_derivative) / 2.0
sigma = len(abs_hr_derivative) / 6.0
convolution = 0
count = 0
for hr_derivative_point in abs_hr_derivative:
convolution = convolution + hr_derivative_point * np.exp(-1 / 2 * (((count - mu) / sigma) ** 2))
count = count + 1
return np.array([np.std(raw_hr)])
# return np.array([convolution, np.std(raw_hr)]) # Old way of doing it.
return np.array([-1])
def cosine_clock_proxy(time):
"""
Cosine
Args:
time (float): Timestamp for epoch
Returns:
np.array : circadian model feature for input to classifiers from cosine
"""
sleep_drive_cosine_shift = 5
return -1 * math.cos((time - sleep_drive_cosine_shift * SECONDS_PER_HOUR) * 2 * math.pi / (SECONDS_PER_HOUR*HOURS_PER_DAY))
def get_time_feature(time):
"""
Gets homeostat prediction as a function of time over the course of the night
Args:
time (float): Timestamp for epoch
Returns:
np.array : homeostat model feature for input to classifiers
"""
homeostat_point_at_time = math.exp(-1 * time / (202.2 * 60.0))
return np.array([homeostat_point_at_time])
def get_clock_feature(time):
"""
Gets circadian clock model prediction from a cosine stand-in
Args:
time (float): Timestamp for epoch
Returns:
np.array | |
+= ' -o %s' % self._prop['output_file']
if self._prop['quiet']:
cmd += ' -v 0' # verbosity level 0 (warnings and errors only)
return cmd
def _any2any(self, files, basename='tmp_easyviz_',
size=None, ofile_ext='.pnm'):
"""Convert a list of files to the file format specified in the
ofile_ext keyword argument. Using either Netpbm tools or convert
(from the ImageMagick package).
"""
netpbm_converters = {'.png': ('pngtopnm', 'pnmtopng'),
'.gif': ('giftopnm', 'ppmtogif'),
'.jpg': ('jpegtopnm', 'pnmtojpeg'),
'.ps': ('pstopnm', 'pnmtops'),
'.eps': ('pstopnm', 'pnmtops'),
'.bmp': ('bmptopnm', 'ppmtobmp'),
'.tif': ('tifftopnm', 'pnmtotiff'),
'.tga': ('tgatopnm', 'ppmtotga'),
'.pnm': ('cat', ''),
}
_check_type(files, 'files', (list,tuple))
ifile_ext = os.path.splitext(files[0])[1]
anytopnm = netpbm_converters[ifile_ext][0]
pnmtoany = netpbm_converters[ofile_ext][1]
pnmscale = 'pnmscale'
#pnmcrop = 'pnmcrop'
convert = 'convert'
app = anytopnm
if findprograms((convert, anytopnm, pnmtoany)):
if self._prop['preferred_package'].lower() == 'imagemagick':
app = convert
elif findprograms(convert):
app = convert
elif not findprograms((anytopnm, pnmtoany)):
raise Exception("Neither %s nor %s was found" % (convert,anytopnm))
quiet = self._prop['quiet']
new_files = []
i = 1 # counter
for file_ in files:
new_file = "%s%04d%s" % (basename, i, ofile_ext)
if app == anytopnm:
options = ''
if quiet and app != 'cat':
options += '-quiet'
if app == 'pstopnm':
options += ' -stdout'
#options += ' -portrait'
cmd = "%(app)s %(options)s %(file_)s " % vars()
if size is not None and findprograms(pnmscale):
w, h = size
cmd += "| %(pnmscale)s -width %(w)s -height %(h)s" % vars()
if pnmtoany != '':
options = ''
if quiet:
options += '-quiet'
if pnmtoany == 'pnmtojpeg':
options += ' -quality 100' # don't lose quality
cmd += " | %(pnmtoany)s %(options)s" % vars()
cmd += " > %s" % new_file
else:
options = ''
if size is not None:
options += '-resize %sx%s' % size
cmd = "%(app)s %(options)s %(file_)s %(new_file)s" % vars()
if not quiet:
print cmd
failure = os.system(cmd)
if failure:
print "... %s failed, jumping to next file..." % app
continue
new_files.append(new_file)
if not quiet:
apps = app
if app != convert and pnmtoany != '':
apps += ' and %s' % pnmtoany
print "%s transformed via %s to %s (%d Kb)" % \
(file_,apps,new_file,int(os.path.getsize(new_file)/1000))
i += 1
return new_files
def _get_aspect_ratio(self):
"""Parse and return the aspect ratio."""
# accept aspect ratio on the form 4:3, 4/3, or 1.3333
aspect = self._prop['aspect']
if isinstance(aspect, str):
if aspect.find(':') > 0:
aspect = aspect.split(':')
elif aspect.find('/'):
aspect = aspect.split('/')
else:
try: aspect = float(aspect)
except: aspect = None
try: aspect = float(aspect[0]) / float(aspect[1])
except: aspect = None
return aspect
def _get_size(self):
"""Parse and return the size."""
legal_sizes = {'sqcif': (128, 96),
'qcif': (176, 144),
'cif': (352, 288),
'4cif': (704, 576)}
size = self._prop['size']
if isinstance(size, str):
if size in legal_sizes:
size = legal_sizes[size]
else:
size = size.split('x') # wxh
if not (isinstance(size, (tuple,list)) and len(size) == 2):
size = None
return size
def html_movie(plotfiles, interval_ms=300, width=800, height=600,
casename=None):
"""
Takes a list plotfiles, such as::
'frame00.png', 'frame01.png', ...
and creates javascript code for animating the frames as a movie in HTML.
The `plotfiles` argument can be of three types:
* A Python list of the names of the image files, sorted in correct
order. The names can be filenames of files reachable by the
HTML code, or the names can be URLs.
* A filename generator using Unix wildcard notation, e.g.,
``frame*.png`` (the files most be accessible for the HTML code).
* A filename generator using printf notation for frame numbering
and limits for the numbers. An example is ``frame%0d.png:0->92``,
which means ``frame00.png``, ``frame01.png``, ..., ``frame92.png``.
This specification of `plotfiles` also allows URLs, e.g.,
``http://mysite.net/files/frames/frame_%04d.png:0->320``.
If `casename` is None, a casename based on the full relative path of the
first plotfile is used as tag in the variables in the javascript code
such that the code for several movies can appear in the same file
(i.e., the various code blocks employ different variables because
the variable names differ).
The returned result is text strings that incorporate javascript to
loop through the plots one after another. The html text also features
buttons for controlling the movie.
The parameter `iterval_ms` is the time interval between loading
successive images and is in milliseconds.
The `width` and `height` parameters do not seem to have any effect
for reasons not understood.
The following strings are returned: header, javascript code, form
with movie and buttons, footer, and plotfiles::
header, jscode, form, footer, plotfiles = html_movie('frames*.png')
# Insert javascript code in some HTML file
htmlfile.write(jscode + form)
# Or write a new standalone file that act as movie player
filename = plotfiles[0][:-4] + '.html'
htmlfile = open(filename, 'w')
htmlfile.write(header + jscode + form + footer)
htmlfile.close
This function is based on code written by <NAME>, based on
a template from Alan McIntyre.
"""
# Alternative method:
# http://stackoverflow.com/questions/9486961/animated-image-with-javascript
# Start with expanding plotfiles if it is a filename generator
if not isinstance(plotfiles, (tuple,list)):
if not isinstance(plotfiles, (str,unicode)):
raise TypeError('plotfiles must be list or filename generator, not %s' % type(plotfiles))
filename_generator = plotfiles
if '*' in filename_generator:
# frame_*.png
if filename_generator.startswith('http'):
raise ValueError('Filename generator %s cannot contain *; must be like http://some.net/files/frame_%%04d.png:0->120' % filename_generator)
plotfiles = glob.glob(filename_generator)
if not plotfiles:
raise ValueError('No plotfiles on the form' %
filename_generator)
plotfiles.sort()
elif '->' in filename_generator:
# frame_%04d.png:0->120
# http://some.net/files/frame_%04d.png:0->120
p = filename_generator.split(':')
filename = ':'.join(p[:-1])
if not re.search(r'%0?\d+', filename):
raise ValueError('Filename generator %s has wrong syntax; missing printf specification as in frame_%%04d.png:0->120' % filename_generator)
if not re.search(r'\d+->\d+', p[-1]):
raise ValueError('Filename generator %s has wrong syntax; must be like frame_%%04d.png:0->120' % filename_generator)
p = p[-1].split('->')
lo, hi = int(p[0]), int(p[1])
plotfiles = [filename % i for i in range(lo,hi+1,1)]
# Check that the plot files really exist, if they are local on the computer
if not plotfiles[0].startswith('http'):
missing_files = [fname for fname in plotfiles
if not os.path.isfile(fname)]
if missing_files:
raise ValueError('Missing plot files: %s' %
str(missing_files)[1:-1])
if casename is None:
# Use plotfiles[0] as the casename, but remove illegal
# characters in variable names since the casename will be
# used as part of javascript variable names.
casename = os.path.splitext(plotfiles[0])[0]
# Use _ for invalid characters
casename = re.sub('[^0-9a-zA-Z_]', '_', casename)
# Remove leading illegal characters until we find a letter or underscore
casename = re.sub('^[^a-zA-Z_]+', '', casename)
filestem, ext = os.path.splitext(plotfiles[0])
if ext == '.png' or ext == '.jpg' or ext == '.jpeg' or ext == 'gif':
pass
else:
raise ValueError('Plotfiles (%s, ...) must be PNG, JPEG, or GIF files with '\
'extension .png, .jpg/.jpeg, or .gif' % plotfiles[0])
header = """\
<html>
<head>
</head>
<body>
"""
no_images = len(plotfiles)
jscode = """
<script language="Javascript">
<!---
var num_images_%(casename)s = %(no_images)d;
var img_width_%(casename)s = %(width)d;
var img_height_%(casename)s = %(height)d;
var interval_%(casename)s = %(interval_ms)d;
var images_%(casename)s = new Array();
function preload_images_%(casename)s()
{
t = document.getElementById("progress");
""" % vars()
i = 0
for fname in plotfiles:
jscode += """
t.innerHTML = "Preloading image ";
images_%(casename)s[%(i)s] = new Image(img_width_%(casename)s, img_height_%(casename)s);
images_%(casename)s[%(i)s].src = "%(fname)s";
""" % vars()
i = i+1
jscode += """
t.innerHTML = "";
}
function tick_%(casename)s()
{
if (frame_%(casename)s > num_images_%(casename)s - 1)
frame_%(casename)s = 0;
document.name_%(casename)s.src = images_%(casename)s[frame_%(casename)s].src;
frame_%(casename)s += 1;
tt = setTimeout("tick_%(casename)s()", interval_%(casename)s);
}
function startup_%(casename)s()
{
preload_images_%(casename)s();
frame_%(casename)s = 0;
setTimeout("tick_%(casename)s()", interval_%(casename)s);
}
function stopit_%(casename)s()
{ clearTimeout(tt); }
function restart_%(casename)s()
{ tt = setTimeout("tick_%(casename)s()", interval_%(casename)s); }
function slower_%(casename)s()
{ interval_%(casename)s = interval_%(casename)s/0.7; }
function faster_%(casename)s()
{ interval_%(casename)s = interval_%(casename)s*0.7; }
// --->
</script>
""" % vars()
plotfile0 = plotfiles[0]
form = """
<form>
<input type="button" value="Start movie" onClick="startup_%(casename)s()">
<input type="button" value="Pause movie" onClick="stopit_%(casename)s()">
<input type="button" value="Restart movie" onClick="restart_%(casename)s()">
<input type="button" value="Slower" onClick="slower_%(casename)s()">
<input type="button" value="Faster" onClick="faster_%(casename)s()">
</form>
<p><div ID="progress"></div></p>
<img src="%(plotfile0)s" name="name_%(casename)s" border=2/>
""" % vars()
footer = '\n</body>\n</html>\n'
return header, jscode, form, footer, plotfiles
def movie(input_files, **kwargs):
"""
Make a movie from a series of image files.
This function makes it very easy to create a movie file from a
series of image files. Several different encoding tools can be
used, such as an HTML file, MEncoder, ffmpeg, mpeg_encode,
ppmtompeg (Netpbm), mpeg2enc (MJPEGTools), and convert (ImageMagick),
to combine the image files together. The encoding tool will be chosen
automatically among these if more than one is installed on the
machine in question (unless | |
# ===============================================================================
# Copyright 2016 dgketchum
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
import shutil
import time
import numpy
# import psutil
import csv
import pandas
import gdal
import ogr
import osgeo
from subprocess import call
from numpy import maximum, minimum, where, isnan, exp, median, full
from numpy import maximum, minimum, where, isnan, exp, median, nonzero, random, log, zeros_like
from app.paths import paths, PathsNotSetExecption
from recharge import MM
from recharge.dict_setup import initialize_master_dict, initialize_static_dict, initialize_initial_conditions_dict, \
set_constants, initialize_master_tracker
from recharge.dynamic_raster_finder import get_penman, get_individ_kcb, get_kcb, get_prisms
from recharge.raster_manager import RasterManager
from recharge.tools import millimeter_to_acreft, unique_path, add_extension, time_it, day_generator
from scipy.stats import norm
from recharge.raster_tools import convert_raster_to_array
from recharge.dict_setup import initialize_point_tracker
from recharge.raster_tools import apply_mask, apply_mask_pixel_tracker
from utils.tracker_plot import run_tracker_plot
class NotConfiguredError(BaseException):
def __str__(self):
return 'The model has not been configured. Processes.configure_run must be called before Processes.run'
class Processes(object):
"""
The purpose of this class is update the etrm master dict daily.
See function explanations.
"""
# Config values. Default values should be specified in RunSpec not here.
_date_range = None
_use_individual_kcb = None
_ro_reinf_frac = None
_swb_mode = None
_rew_ceff = None
_evap_ceff = None
_winter_evap_limiter = None
_winter_end_day = None
_winter_start_day = None
_is_configured = False
def __init__(self, cfg, taw=None):
# JIR
# self.tracker = None
self.taw = taw
self.point_tracker = None
self._initial_depletions = None
if not paths.is_set():
raise PathsNotSetExecption()
self._cfg = cfg
# set global mask and polygons paths
paths.set_polygons_path(cfg.polygons)
if cfg.mask:
paths.set_mask_path(cfg.mask)
if cfg.binary_shapefile:
paths.set_point_shape_path(cfg.binary_shapefile)
if cfg.use_verify_paths:
paths.verify()
print '##############################', paths.mask, cfg.mask
# Define user-controlled constants, these are constants to start with day one, replace
# with spin-up data when multiple years are covered
self._info('Constructing/Initializing Processes')
self._constants = set_constants()
print 'default p value {}'.format(self._constants['p'])
if cfg.pvalue is not None:
self._constants['p'] = cfg.pvalue
print 'this is the p value from the config {}'.format(self._constants['p'])
# Initialize point and raster dicts for static values (e.g. TAW) and initial conditions (e.g. de)
# from spin up. Define shape of domain. Create a month and annual dict for output raster variables
# as defined in self._outputs. Don't initialize point_tracker until a time step has passed #TODO - point_tracker?
self._static = initialize_static_dict(cfg.static_pairs)
# JIR
# not necessary
# self._initial = initialize_initial_conditions_dict(cfg.initial_pairs)
self.xplot = self._cfg.xplot
self.yplot = self._cfg.yplot
self.plot_output = self._cfg.plot_output
shape = self._static['taw'].shape # Here's where the shape of the grid is determined GELP
self._master = initialize_master_dict(shape)
self._raster_manager = RasterManager(cfg, self.taw)
self.initialize()
def configure_run(self, runspec):
"""
configure the model run with a RunSpec object
:param runspec: RunSpec
:return:
"""
self._info('Configuring Processes')
if runspec.save_dates:
self.set_save_dates(runspec.save_dates)
if runspec.taw_modification is not None:
self.modify_taw(runspec.taw_modification)
if runspec.uniform_taw is not None:
self.uniform_taw(runspec.uniform_taw)
self._date_range = runspec.date_range
self._use_individual_kcb = runspec.use_individual_kcb
self._ro_reinf_frac = runspec.ro_reinf_frac
self._swb_mode = runspec.swb_mode
self._rew_ceff = runspec.rew_ceff
self._evap_ceff = runspec.evap_ceff
self._winter_evap_limiter = runspec.winter_evap_limiter
self._winter_end_day = runspec.winter_end_day
self._winter_start_day = runspec.winter_start_day
print '---------- CONFIGURATION ---------------'
for attr in ('date_range', 'use_individual_kcb',
'winter_evap_limiter', 'winter_end_day', 'winter_start_day',
'ro_reinf_frac', 'swb_mode', 'rew_ceff', 'evap_ceff'):
print '{:<20s}{}'.format(attr, getattr(self, '_{}'.format(attr)))
print '----------------------------------------'
self._is_configured = True
def run(self):
"""
run the ETRM model
:return:
"""
if not self._is_configured:
raise NotConfiguredError()
self._info('Run started. Simulation period: start={}, end={}'.format(*self._date_range))
c = self._constants
m = self._master
s = self._static
rm = self._raster_manager
start_monsoon, end_monsoon = c['s_mon'].timetuple().tm_yday, c['e_mon'].timetuple().tm_yday
self._info('Monsoon: {} to {}'.format(start_monsoon, end_monsoon))
# big_counter = 0
st = time.time()
if self.point_tracker is None:
# to avoid a memory leak, at this stage we write to a file.
# initialize_point_tracker(m, point_arr)
# self.point_tracker = True
if paths.point_shape and os.path.isfile(paths.point_shape):
point_arr = self._pixels_of_interest_to_array(paths.point_shape)
self.point_tracker = initialize_point_tracker(m, point_arr)
if self._cfg.use_walnut_gulch_ro:
# set random seed value in configuration file
random.seed(self._cfg.seed)
for counter, day in enumerate(day_generator(*self._date_range)):
tm_yday = day.timetuple().tm_yday
self._info('DAY: {}({})'.format(day, tm_yday))
time_it(self._do_daily_raster_load, day)
a, b = None, None
if self._cfg.use_monsoon_precip_correction:
# # modify the PRISM precipitation - Commented out by GELP 4/27/2019
if start_monsoon <= tm_yday <= end_monsoon:
a = 1.612722
b = 0.676904
else:
a = 0.488870
b = 0.993831
elif self._cfg.use_mountain_precip_correction:
if start_monsoon <= tm_yday <= end_monsoon:
a = 0.3276
b = 0.7829
else:
# todo: is this appropriate for the mountains non monsoon
a = 0.4888870
b = 0.993831
if a is not None:
m['precip'] = maximum((m['precip'] - a)/b, 0)
m['inten'] = zeros_like(m['precip'])
if self._cfg.use_walnut_gulch_ro:
# generate random number
random_number = random.randn()
percentile = norm.cdf(random_number)
# stochastic estimate of runoff based on field data from Walnut Gulch, AZ (see Xu thesis, 2018)
log_precip = log(m['precip'][m['precip'] > 0])
log_inten = zeros_like(log_precip)
if start_monsoon <= tm_yday <= end_monsoon:
log_inten[log_precip < 0] = norm.ppf(percentile, -2.43, 1.21)
log_inten[(log_precip >= 0) & (log_precip < 0.5)] = norm.ppf(percentile, -2.89, 1.06)
log_inten[(log_precip >= 0.5) & (log_precip < 1)] = norm.ppf(percentile, -2.83, 0.96)
log_inten[(log_precip >= 1) & (log_precip < 1.5)] = norm.ppf(percentile, -2.57, 0.95)
log_inten[(log_precip >= 1.5) & (log_precip < 2)] = norm.ppf(percentile, -2.44, 0.90)
log_inten[(log_precip >= 2) & (log_precip < 2.5)] = norm.ppf(percentile, -2.31, 0.89)
log_inten[(log_precip >= 2.5) & (log_precip < 3)] = norm.ppf(percentile, -2.18, 0.84)
log_inten[(log_precip >= 3) & (log_precip < 3.5)] = norm.ppf(percentile, -1.85, 0.77)
log_inten[log_precip >= 3.5] = norm.ppf(percentile, -1.86, 0.73)
else:
log_inten[log_precip < 0] = norm.ppf(percentile, -2.77, 1.33)
log_inten[(log_precip >= 0) & (log_precip < 0.5)] = norm.ppf(percentile, -3.48, 0.91)
log_inten[(log_precip >= 0.5) & (log_precip < 1)] = norm.ppf(percentile, -3.47, 0.77)
log_inten[(log_precip >= 1) & (log_precip < 1.5)] = norm.ppf(percentile, -3.37, 0.70)
log_inten[(log_precip >= 1.5) & (log_precip < 2)] = norm.ppf(percentile, -3.38, 0.61)
log_inten[(log_precip >= 2) & (log_precip < 2.5)] = norm.ppf(percentile, -3.28, 0.65)
log_inten[(log_precip >= 2.5) & (log_precip < 3)] = norm.ppf(percentile, -3.08, 0.56)
log_inten[(log_precip >= 3) & (log_precip < 3.5)] = norm.ppf(percentile, -3.12, 0.45)
log_inten[log_precip >= 3.5] = norm.ppf(percentile, -3.13, 0.66)
m['inten'][m['precip'] > 0] = exp(log_inten) # mm/min
# Assume 2-hour storms in the monsoon season, and 6 hour storms otherwise
# If melt is occurring (calculated in _do_snow), infiltration will be set to 24 hours
# [mm/day] #
m['soil_ksat'] = s['soil_ksat']
# ksat-runoff version
# if start_monsoon <= tm_yday <= end_monsoon:
# m['soil_ksat'] = s['soil_ksat'] * 2 / 24.
# else:
# m['soil_ksat'] = s['soil_ksat'] * 6 / 24.
time_it(self._do_snow, m, c)
# time_it(self._do_soil_ksat_adjustment, m, s) # forest litter adjustment is hard to justify
print 'Plant stress threshold p {} '.format(c['p'])
time_it(self._do_dual_crop_transpiration, tm_yday, m, s, c)
time_it(self._do_fraction_covered, m, s, c)
# if self._swb_mode == 'fao':
# time_it(self._do_fao_soil_water_balance, m, s, c)
# elif self._swb_mode == 'vertical':
# time_it(self._do_vert_soil_water_balance, m, s, c)
func = self._do_fao_soil_water_balance if self._swb_mode == 'fao' else self._do_vert_soil_water_balance
time_it(func, m, s, c, tm_yday)
time_it(self._do_mass_balance, day, swb=self._swb_mode)
time_it(self._do_accumulations)
time_it(rm.update_raster_obj, m, day)
is_first = counter == 0
time_it(self._update_master_tracker, m, day, is_first)
self._update_point_tracker(m, day, is_first)
self._info('saving tabulated data')
time_it(rm.save_csv)
if paths.mask is not None:
self.save_mask()
# self.save_tracker() #TODO check gabe merge
self._info('Execution time: {}'.format(time.time() - st))
def set_save_dates(self, dates):
"""
set the individual days to write
:param dates: list of datetimes
:return:
"""
self._raster_manager.set_save_dates(dates)
def modify_master(self, alpha=1, beta=1, gamma=1, zeta=1, theta=1):
"""
modify the master dictionary
:param alpha: temp scalar
:param beta: precip scalar
:param gamma: etrs scalar
:param zeta: kcb scalar
:param theta: soil_ksat scalar
:return:
"""
m = self._master
m['temp'] += alpha
m['precip'] *= beta
m['etrs'] *= gamma
m['kcb'] *= zeta
m['soil_ksat'] *= theta
def modify_taw(self, taw_modification):
"""
Gets the taw array, modifies it by a constant scalar value
(taw_modification) and returns the resulting array
:param taw_modification: object
:return: taw array
"""
s = self._static
taw = s['taw']
taw = taw * taw_modification
s['taw'] = taw
return taw
def uniform_taw(self, taw_value):
"""Replaces the taw array with a single value
:param taw_value: object
:return taw_uniform array
"""
print '===========================\nrunning uniform_taw\n==========================='
m = self._master # testing 6/2/17
s = self._static
taw = s['taw']
taw_shape = taw.shape
s['taw'] = numpy.full(taw_shape, taw_value)
taw = s['taw']
m['pdr'] = m['dr'] = taw
return taw
def get_taw(self):
"""
Gets | |
<gh_stars>0
'''
----------------------------------------------------------------------------
This file is part of the Sanworks Pulse Pal repository
Copyright (C) 2016 Sanworks LLC, Sound Beach, New York, USA
----------------------------------------------------------------------------
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3.
This program is distributed WITHOUT ANY WARRANTY and without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import struct
import math
class PulsePalObject(object):
def __init__(self):
self.serialObject = 0
self.OpMenuByte = 213
self.firmwareVersion = 0
self.model = 0
self.dac_bitMax = 0
self.cycleFrequency = 20000
self.isBiphasic = [float('nan'), 0, 0, 0, 0]
self.phase1Voltage = [float('nan'), 5, 5, 5, 5]
self.phase2Voltage = [float('nan'), -5, -5, -5, -5]
self.restingVoltage = [float('nan'), 0, 0, 0, 0]
self.phase1Duration = [float('nan'), 0.001, 0.001, 0.001, 0.001]
self.interPhaseInterval = [float('nan'), 0.001, 0.001, 0.001, 0.001]
self.phase2Duration = [float('nan'), 0.001, 0.001, 0.001, 0.001]
self.interPulseInterval = [float('nan'), 0.01, 0.01, 0.01, 0.01]
self.burstDuration = [float('nan'), 0, 0, 0, 0]
self.interBurstInterval = [float('nan'), 0, 0, 0, 0]
self.pulseTrainDuration = [float('nan'), 1, 1, 1, 1]
self.pulseTrainDelay = [float('nan'), 0, 0, 0, 0]
self.linkTriggerChannel1 = [float('nan'), 1, 1, 1, 1]
self.linkTriggerChannel2 = [float('nan'), 0, 0, 0, 0]
self.customTrainID = [float('nan'), 0, 0, 0, 0]
self.customTrainTarget = [float('nan'), 0, 0, 0, 0]
self.customTrainLoop = [float('nan'), 0, 0, 0, 0]
self.triggerMode = [float('nan'), 0, 0]
self.outputParameterNames = ['isBiphasic', 'phase1Voltage', 'phase2Voltage', 'phase1Duration',
'interPhaseInterval',
'phase2Duration', 'interPulseInterval', 'burstDuration', 'interBurstInterval',
'pulseTrainDuration',
'pulseTrainDelay', 'linkTriggerChannel1', 'linkTriggerChannel2', 'customTrainID',
'customTrainTarget', 'customTrainLoop', 'restingVoltage']
self.triggerParameterNames = ['triggerMode']
def connect(self, serialPortName):
import serial
self.serialObject = serial.Serial(serialPortName, 115200, timeout=1)
handshakeByteString = struct.pack('BB', self.OpMenuByte, 72)
self.serialObject.write(handshakeByteString)
Response = self.serialObject.read(5)
assert len(Response) > 0, f'No PulsePal found on {serialPortName}! Make sure PulsePal is connected'
fvBytes = Response[1:5]
self.firmwareVersion = struct.unpack('<I', fvBytes)[0]
if self.firmwareVersion < 20:
self.model = 1
self.dac_bitMax = 255
else:
self.model = 2
self.dac_bitMax = 65535
if self.firmwareVersion == 20:
print(
"Notice: NOTE: A firmware update is available. It fixes a bug in Pulse Gated trigger mode when used with multiple inputs.")
print("To update, follow the instructions at https://sites.google.com/site/pulsepalwiki/updating-firmware")
self.serialObject.write(str.encode('YPYTHON'))
def disconnect(self):
terminateByteString = struct.pack('BB', self.OpMenuByte, 81)
self.serialObject.write(terminateByteString)
self.serialObject.close()
def programOutputChannelParam(self, paramName, channel, value):
originalValue = value
if isinstance(paramName, str):
paramCode = self.outputParameterNames.index(paramName) + 1
else:
paramCode = paramName
if 2 <= paramCode <= 3:
value = math.ceil(((value + 10) / float(20)) * self.dac_bitMax) # Convert volts to bits
if self.model == 1:
programByteString = struct.pack('BBBBB', self.OpMenuByte, 74, paramCode, channel, value)
else:
programByteString = struct.pack('<BBBBH', self.OpMenuByte, 74, paramCode, channel, value)
elif paramCode == 17:
value = math.ceil(((value + 10) / float(20)) * self.dac_bitMax) # Convert volts to bits
if self.model == 1:
programByteString = struct.pack('BBBBB', self.OpMenuByte, 74, paramCode, channel, value)
else:
programByteString = struct.pack('<BBBBH', self.OpMenuByte, 74, paramCode, channel, value)
elif 4 <= paramCode <= 11:
programByteString = struct.pack('<BBBBL', self.OpMenuByte, 74, paramCode, channel,
int(value * self.cycleFrequency))
else:
programByteString = struct.pack('BBBBB', self.OpMenuByte, 74, paramCode, channel, value)
self.serialObject.write(programByteString)
# Receive acknowledgement
ok = self.serialObject.read(1)
if len(ok) == 0:
raise PulsePalError(
'Error: Pulse Pal did not return an acknowledgement byte after a call to programOutputChannelParam.')
# Update the PulsePal object's parameter fields
if paramCode == 1:
self.isBiphasic[channel] = originalValue
elif paramCode == 2:
self.phase1Voltage[channel] = originalValue
elif paramCode == 3:
self.phase2Voltage[channel] = originalValue
elif paramCode == 4:
self.phase1Duration[channel] = originalValue
elif paramCode == 5:
self.interPhaseInterval[channel] = originalValue
elif paramCode == 6:
self.phase2Duration[channel] = originalValue
elif paramCode == 7:
self.interPulseInterval[channel] = originalValue
elif paramCode == 8:
self.burstDuration[channel] = originalValue
elif paramCode == 9:
self.interBurstInterval[channel] = originalValue
elif paramCode == 10:
self.pulseTrainDuration[channel] = originalValue
elif paramCode == 11:
self.pulseTrainDelay[channel] = originalValue
elif paramCode == 12:
self.linkTriggerChannel1[channel] = originalValue
elif paramCode == 13:
self.linkTriggerChannel2[channel] = originalValue
elif paramCode == 14:
self.customTrainID[channel] = originalValue
elif paramCode == 15:
self.customTrainTarget[channel] = originalValue
elif paramCode == 16:
self.customTrainLoop[channel] = originalValue
elif paramCode == 17:
self.restingVoltage[channel] = originalValue
def programTriggerChannelParam(self, paramName, channel, value):
originalValue = value
if isinstance(paramName, str):
paramCode = self.triggerParameterNames.index(paramName) + 1
else:
paramCode = paramName
messageBytes = struct.pack('BBBBB', self.OpMenuByte, 74, paramCode, channel, value)
self.serialObject.write(messageBytes)
# Receive acknowledgement
ok = self.serialObject.read(1)
if len(ok) == 0:
raise PulsePalError(
'Error: Pulse Pal did not return an acknowledgement byte after a call to programTriggerChannelParam.')
if paramCode == 1:
self.triggerMode[channel] = originalValue
def syncAllParams(self):
# First make a list data-type with all param values in an iteration of the loop.
# Then pack them by data-type and append to string with + operation
programByteString = struct.pack('BB', self.OpMenuByte, 73)
# Add 32-bit time params
programValues = [0] * 32
pos = 0
for i in range(1, 5):
programValues[pos] = self.phase1Duration[i] * self.cycleFrequency
pos += 1
programValues[pos] = self.interPhaseInterval[i] * self.cycleFrequency
pos += 1
programValues[pos] = self.phase2Duration[i] * self.cycleFrequency
pos += 1
programValues[pos] = self.interPulseInterval[i] * self.cycleFrequency
pos += 1
programValues[pos] = self.burstDuration[i] * self.cycleFrequency
pos += 1
programValues[pos] = self.interBurstInterval[i] * self.cycleFrequency
pos += 1
programValues[pos] = self.pulseTrainDuration[i] * self.cycleFrequency
pos += 1
programValues[pos] = self.pulseTrainDelay[i] * self.cycleFrequency
pos += 1
# Pack 32-bit times to bytes and append to program byte-string
programByteString = programByteString + struct.pack('<LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL', *programValues)
# Add 16-bit voltages
if self.model == 2:
programValues = [0] * 12
pos = 0
for i in range(1, 5):
value = math.ceil(((self.phase1Voltage[i] + 10) / float(20)) * self.dac_bitMax) # Convert volts to bits
programValues[pos] = value
pos += 1
value = math.ceil(((self.phase2Voltage[i] + 10) / float(20)) * self.dac_bitMax) # Convert volts to bits
programValues[pos] = value
pos += 1
value = math.ceil(
((self.restingVoltage[i] + 10) / float(20)) * self.dac_bitMax) # Convert volts to bits
programValues[pos] = value
pos += 1
programByteString = programByteString + struct.pack('<HHHHHHHHHHHH', *programValues)
# Add 8-bit params
if self.model == 1:
programValues = [0] * 28
else:
programValues = [0] * 16
pos = 0
for i in range(1, 5):
programValues[pos] = self.isBiphasic[i]
pos += 1
if self.model == 1:
value = math.ceil(((self.phase1Voltage[i] + 10) / float(20)) * self.dac_bitMax) # Convert volts to bits
programValues[pos] = value
pos += 1
value = math.ceil(((self.phase2Voltage[i] + 10) / float(20)) * self.dac_bitMax) # Convert volts to bits
programValues[pos] = value
pos += 1
programValues[pos] = self.customTrainID[i]
pos += 1
programValues[pos] = self.customTrainTarget[i]
pos += 1
programValues[pos] = self.customTrainLoop[i]
pos += 1
if self.model == 1:
value = math.ceil(
((self.restingVoltage[i] + 10) / float(20)) * self.dac_bitMax) # Convert volts to bits
programValues[pos] = value
pos += 1
# Pack 8-bit params to bytes and append to program byte-string
if self.model == 1:
programByteString = programByteString + struct.pack('BBBBBBBBBBBBBBBBBBBBBBBBBBBB', *programValues)
else:
programByteString = programByteString + struct.pack('BBBBBBBBBBBBBBBB', *programValues)
# Add trigger channel link params
programValues = [0] * 8
pos = 0
for i in range(1, 5):
programValues[pos] = self.linkTriggerChannel1[i]
pos += 1
for i in range(1, 5):
programValues[pos] = self.linkTriggerChannel2[i]
pos += 1
# Pack 8-bit params to bytes and append to program byte-string
programByteString = programByteString + struct.pack('BBBBBBBB', *programValues)
# Add trigger mode params
programByteString = programByteString + struct.pack('BB', self.triggerMode[1], self.triggerMode[2])
# Send program byte string to PulsePal
self.serialObject.write(programByteString)
# Receive acknowledgement
ok = self.serialObject.read(1)
if len(ok) == 0:
raise PulsePalError(
'Error: Pulse Pal did not return an acknowledgement byte after a call to syncAllParams.')
def sendCustomPulseTrain(self, customTrainID, pulseTimes, pulseVoltages):
nPulses = len(pulseTimes)
for i in range(0, nPulses):
pulseTimes[i] = pulseTimes[i] * self.cycleFrequency # Convert seconds to multiples of minimum cycle (100us)
pulseVoltages[i] = math.ceil(
((pulseVoltages[i] + 10) / float(20)) * self.dac_bitMax) # Convert volts to bytes
if customTrainID == 1:
messageBytes = struct.pack('BB', self.OpMenuByte, 75) # Op code for programming train 1
else:
messageBytes = struct.pack('BB', self.OpMenuByte, 76) # Op code for programming train 2
if self.model == 1:
messageBytes = messageBytes + struct.pack('<BL', 0,
nPulses) # 0 is the USB packet correction byte. See PulsePal wiki
else:
messageBytes = messageBytes + struct.pack('<L', nPulses)
messageBytes = messageBytes + struct.pack(('<' + 'L' * nPulses), *pulseTimes) | |
<filename>pynetdicom/test/test_ae.py
#!/usr/bin/env python
import logging
import threading
import unittest
from unittest.mock import patch
from pydicom.uid import UID, ImplicitVRLittleEndian
from pynetdicom import AE
from pynetdicom import VerificationSOPClass, StorageSOPClassList, \
QueryRetrieveSOPClassList
logger = logging.getLogger('pynetdicom')
handler = logging.StreamHandler()
logger.setLevel(logging.CRITICAL)
"""
Initialisation
--------------
AE(
ae_title='PYNETDICOM',
port=0,
scu_sop_class=[],
scp_sop_class=[],
transfer_syntax=[ExplicitVRLittleEndian,
ImplicitVRLittleEndian,
ExplicitVRBigEndian]
)
Functions
---------
AE.start()
AE.stop()
AE.quit()
AE.associate(addr, port)
Attributes
----------
acse_timeout - int
active_associations - list of pynetdicom.association.Association
address - str
ae_title - str
client_socket - socket.socket
dimse_timeout - int
network_timeout - int
maximum_associations - int
maximum_pdu_size - int
port - int
presentation_contexts_scu - List of pynetdicom.utils.PresentationContext
presentation_contexts_scp - List of pynetdicom.utils.PresentationContext
require_calling_aet - str
require_called_aet - str
scu_supported_sop - List of pydicom.uid.UID
scp_supported_sop - List of pydicom.uid.UID
transfer_syntaxes - List of pydicom.uid.UID
Callbacks
---------
on_c_echo()
on_c_store(dataset)
on_c_find(dataset)
on_c_find_cancel()
on_c_get(dataset)
on_c_get_cancel()
on_c_move(dataset)
on_c_move_cancel()
on_n_event_report()
on_n_get()
on_n_set()
on_n_action()
on_n_create()
on_n_delete()
on_receive_connection()
on_make_connection()
on_association_requested(primitive)
on_association_accepted(primitive)
on_association_rejected(primitive)
on_association_released(primitive)
on_association_aborted(primitive)
"""
class AEVerificationSCP(threading.Thread):
def __init__(self):
self.ae = AE(port=11112, scp_sop_class=[VerificationSOPClass])
threading.Thread.__init__(self)
self.daemon = True
self.start()
def run(self):
self.ae.start()
def stop(self):
self.ae.stop()
class AEStorageSCP(threading.Thread):
def __init__(self):
self.ae = AE(port=11112, scp_sop_class=StorageSOPClassList)
threading.Thread.__init__(self)
self.daemon = True
self.start()
def run(self):
self.ae.start()
def stop(self):
self.ae.stop()
class TestAEGoodCallbacks(unittest.TestCase):
def test_on_c_echo_called(self):
""" Check that SCP AE.on_c_echo() was called """
scp = AEVerificationSCP()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
with patch.object(scp.ae, 'on_c_echo') as mock:
assoc.send_c_echo()
mock.assert_called_with()
assoc.release()
self.assertRaises(SystemExit, scp.stop)
def test_on_c_store_called(self):
""" Check that SCP AE.on_c_store(dataset) was called """
scp = AEStorageSCP()
ae = AE(scu_sop_class=StorageSOPClassList)
assoc = ae.associate('localhost', 11112)
#with patch.object(scp.ae, 'on_c_store') as mock:
# assoc.send_c_store(dataset)
#mock.assert_called_with()
assoc.release()
self.assertRaises(SystemExit, scp.stop)
def test_on_c_find_called(self): pass
def test_on_c_get_called(self): pass
def test_on_c_move_called(self): pass
def test_on_n_event_report_called(self): pass
def test_on_n_get_called(self): pass
def test_on_n_set_called(self): pass
def test_on_n_action_called(self): pass
def test_on_n_create_called(self): pass
def test_on_n_delete_called(self): pass
def test_on_receive_connection_called(self): pass
def test_on_make_connection_called(self): pass
def test_on_association_req_called(self): pass
def test_on_association_acc_called(self): pass
def test_on_association_rej_called(self): pass
def test_on_association_rel_called(self): pass
def test_on_association_abort_called(self): pass
class TestAEGoodAssociation(unittest.TestCase):
def test_associate_establish_release(self):
""" Check SCU Association with SCP """
scp = AEVerificationSCP()
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112)
self.assertTrue(assoc.is_established == True)
assoc.release()
self.assertTrue(assoc.is_established == False)
self.assertRaises(SystemExit, scp.stop)
def test_associate_max_pdu(self):
""" Check Association has correct max PDUs on either end """
scp = AEVerificationSCP()
scp.ae.maximum_pdu_size = 54321
ae = AE(scu_sop_class=[VerificationSOPClass])
assoc = ae.associate('localhost', 11112, max_pdu=12345)
self.assertTrue(scp.ae.active_associations[0].local_max_pdu == 54321)
self.assertTrue(scp.ae.active_associations[0].peer_max_pdu == 12345)
self.assertTrue(assoc.local_max_pdu == 12345)
self.assertTrue(assoc.peer_max_pdu == 54321)
assoc.release()
# Check 0 max pdu value
assoc = ae.associate('localhost', 11112, max_pdu=0)
self.assertTrue(assoc.local_max_pdu == 0)
self.assertTrue(scp.ae.active_associations[0].peer_max_pdu == 0)
assoc.release()
self.assertRaises(SystemExit, scp.stop)
def test_association_acse_timeout(self):
""" Check that the Association timeouts are being set correctly """
scp = AEVerificationSCP()
scp.ae.acse_timeout = 0
scp.ae.dimse_timeout = 0
ae = AE(scu_sop_class=[VerificationSOPClass])
ae.acse_timeout = 0
ae.dimse_timeout = 0
assoc = ae.associate('localhost', 11112)
self.assertTrue(scp.ae.active_associations[0].acse_timeout == 0)
self.assertTrue(scp.ae.active_associations[0].dimse_timeout == 0)
self.assertTrue(assoc.acse_timeout == 0)
self.assertTrue(assoc.dimse_timeout == 0)
assoc.release()
scp.ae.acse_timeout = 21
scp.ae.dimse_timeout = 22
ae.acse_timeout = 31
ae.dimse_timeout = 32
assoc = ae.associate('localhost', 11112)
self.assertTrue(scp.ae.active_associations[0].acse_timeout == 21)
self.assertTrue(scp.ae.active_associations[0].dimse_timeout == 22)
self.assertTrue(assoc.acse_timeout == 31)
self.assertTrue(assoc.dimse_timeout == 32)
assoc.release()
self.assertRaises(SystemExit, scp.stop)
class TestAEGoodTimeoutSetters(unittest.TestCase):
def test_acse_timeout(self):
""" Check AE ACSE timeout change produces good value """
ae = AE(scu_sop_class=['1.2.840.10008.1.1'])
ae.acse_timeout = None
self.assertTrue(ae.acse_timeout == 0)
ae.acse_timeout = -100
self.assertTrue(ae.acse_timeout == 0)
ae.acse_timeout = 'a'
self.assertTrue(ae.acse_timeout == 0)
ae.acse_timeout = 0
self.assertTrue(ae.acse_timeout == 0)
ae.acse_timeout = 30
self.assertTrue(ae.acse_timeout == 30)
def test_dimse_timeout(self):
""" Check AE DIMSE timeout change produces good value """
ae = AE(scu_sop_class=['1.2.840.10008.1.1'])
ae.dimse_timeout = None
self.assertTrue(ae.dimse_timeout == 0)
ae.dimse_timeout = -100
self.assertTrue(ae.dimse_timeout == 0)
ae.dimse_timeout = 'a'
self.assertTrue(ae.dimse_timeout == 0)
ae.dimse_timeout = 0
self.assertTrue(ae.dimse_timeout == 0)
ae.dimse_timeout = 30
self.assertTrue(ae.dimse_timeout == 30)
def test_network_timeout(self):
""" Check AE network timeout change produces good value """
ae = AE(scu_sop_class=['1.2.840.10008.1.1'])
ae.network_timeout = None
self.assertTrue(ae.network_timeout == 60)
ae.network_timeout = -100
self.assertTrue(ae.network_timeout == 60)
ae.network_timeout = 'a'
self.assertTrue(ae.network_timeout == 60)
ae.network_timeout = 0
self.assertTrue(ae.network_timeout == 0)
ae.network_timeout = 30
self.assertTrue(ae.network_timeout == 30)
class TestAEGoodMiscSetters(unittest.TestCase):
def test_ae_title_good(self):
""" Check AE title change produces good value """
ae = AE(scu_sop_class=['1.2.840.10008.1.1'])
ae.ae_title = ' TEST '
self.assertTrue(ae.ae_title == 'TEST ')
ae.ae_title = ' TEST'
self.assertTrue(ae.ae_title == 'TEST ')
ae.ae_title = ' TEST'
self.assertTrue(ae.ae_title == 'TEST ')
ae.ae_title = 'a TEST'
self.assertTrue(ae.ae_title == 'a TES')
ae.ae_title = 'a TEST'
self.assertTrue(ae.ae_title == 'a TEST ')
def test_max_assoc_good(self):
""" Check AE maximum association change produces good value """
ae = AE(scu_sop_class=['1.2.840.10008.1.1'])
ae.maximum_associations = -10
self.assertTrue(ae.maximum_associations == 1)
ae.maximum_associations = ['a']
self.assertTrue(ae.maximum_associations == 1)
ae.maximum_associations = '10'
self.assertTrue(ae.maximum_associations == 1)
ae.maximum_associations = 0
self.assertTrue(ae.maximum_associations == 1)
ae.maximum_associations = 5
self.assertTrue(ae.maximum_associations == 5)
def test_max_pdu_good(self):
""" Check AE maximum pdu size change produces good value """
ae = AE(scu_sop_class=['1.2.840.10008.1.1'])
ae.maximum_pdu_size = -10
self.assertTrue(ae.maximum_pdu_size == 16382)
ae.maximum_pdu_size = ['a']
self.assertTrue(ae.maximum_pdu_size == 16382)
ae.maximum_pdu_size = '10'
self.assertTrue(ae.maximum_pdu_size == 16382)
ae.maximum_pdu_size = 0
self.assertTrue(ae.maximum_pdu_size == 0)
ae.maximum_pdu_size = 5000
self.assertTrue(ae.maximum_pdu_size == 5000)
def test_req_calling_aet(self):
""" Check AE require calling aet change produces good value """
ae = AE(scu_sop_class=['1.2.840.10008.1.1'])
ae.require_calling_aet = -10
self.assertTrue(ae.require_calling_aet == '')
ae.require_calling_aet = ['a']
self.assertTrue(ae.require_calling_aet == '')
ae.require_calling_aet = '10'
self.assertTrue(ae.require_calling_aet == '10')
ae.require_calling_aet = ' TEST '
self.assertTrue(ae.require_calling_aet == 'TEST')
ae.require_calling_aet = ' TEST'
self.assertTrue(ae.require_calling_aet == 'TEST')
ae.require_calling_aet = ' TEST'
self.assertTrue(ae.require_calling_aet == 'TEST')
ae.require_calling_aet = 'a TEST'
self.assertTrue(ae.require_calling_aet == 'a TES')
def test_req_called_aet(self):
""" Check AE require called aet change produces good value """
ae = AE(scu_sop_class=['1.2.840.10008.1.1'])
ae.require_called_aet = -10
self.assertTrue(ae.require_called_aet == '')
ae.require_called_aet = ['a']
self.assertTrue(ae.require_called_aet == '')
ae.require_called_aet = '10'
self.assertTrue(ae.require_called_aet == '10')
ae.require_called_aet = ' TEST '
self.assertTrue(ae.require_called_aet == 'TEST')
ae.require_called_aet = ' TEST'
self.assertTrue(ae.require_called_aet == 'TEST')
ae.require_called_aet = ' TEST'
self.assertTrue(ae.require_called_aet == 'TEST')
ae.require_called_aet = 'a TEST'
self.assertTrue(ae.require_called_aet == 'a TES')
class TestAEGoodInitialisation(unittest.TestCase):
def test_sop_classes_good_uid(self):
""" Check AE initialisation produces valid supported SOP classes """
ae = AE(scu_sop_class=['1.2.840.10008.1.1', '1.2.840.10008.1.1.1'])
self.assertTrue(ae.scu_supported_sop == [UID('1.2.840.10008.1.1'),
UID('1.2.840.10008.1.1.1')])
ae = AE(scu_sop_class=[UID('1.2.840.10008.1.1')])
self.assertTrue(ae.scu_supported_sop == [UID('1.2.840.10008.1.1')])
ae = AE(scu_sop_class=[VerificationSOPClass])
self.assertTrue(ae.scu_supported_sop == [UID('1.2.840.10008.1.1')])
ae = AE(scu_sop_class=[1, VerificationSOPClass, 3])
self.assertTrue(ae.scu_supported_sop == [UID('1.2.840.10008.1.1')])
ae = AE(scp_sop_class=['1.2.840.10008.1.1', '1.2.840.10008.1.1.1'])
self.assertTrue(ae.scp_supported_sop == [UID('1.2.840.10008.1.1'),
UID('1.2.840.10008.1.1.1')])
ae = AE(scp_sop_class=[UID('1.2.840.10008.1.1')])
self.assertTrue(ae.scp_supported_sop == [UID('1.2.840.10008.1.1')])
ae = AE(scp_sop_class=[VerificationSOPClass])
self.assertTrue(ae.scp_supported_sop == [UID('1.2.840.10008.1.1')])
ae = AE(scp_sop_class=[1, VerificationSOPClass, 3])
self.assertTrue(ae.scp_supported_sop == [UID('1.2.840.10008.1.1')])
def test_transfer_syntax_good_uid(self):
""" Check AE initialisation produces valid transfer syntaxes """
ae = AE(scu_sop_class=['1.2.840.10008.1.1'],
transfer_syntax=['1.2.840.10008.1.2'])
self.assertTrue(ae.transfer_syntaxes == [UID('1.2.840.10008.1.2')])
ae = AE(scu_sop_class=['1.2.840.10008.1.1'],
transfer_syntax=['1.2.840.10008.1.2', '1.2.840.10008.1.1'])
self.assertTrue(ae.transfer_syntaxes == [UID('1.2.840.10008.1.2')])
ae = AE(scu_sop_class=['1.2.840.10008.1.1'],
transfer_syntax=['1.2.840.10008.1.2', '1.2.840.10008.1.2.2'])
self.assertTrue(ae.transfer_syntaxes == [UID('1.2.840.10008.1.2'),
UID('1.2.840.10008.1.2.2')])
ae = AE(scu_sop_class=['1.2.840.10008.1.1'],
transfer_syntax=[UID('1.2.840.10008.1.2')])
self.assertTrue(ae.transfer_syntaxes == [UID('1.2.840.10008.1.2')])
ae = AE(scu_sop_class=['1.2.840.10008.1.1'],
transfer_syntax=[ImplicitVRLittleEndian])
self.assertTrue(ae.transfer_syntaxes == [UID('1.2.840.10008.1.2')])
class TestAEBadInitialisation(unittest.TestCase):
def test_ae_title_all_spaces(self):
""" AE should fail if ae_title is all spaces """
self.assertRaises(ValueError, AE, ' ', 0, [VerificationSOPClass])
def test_ae_title_empty_str(self):
""" AE should fail if ae_title is an empty str """
self.assertRaises(ValueError, AE, '', 0, [VerificationSOPClass])
def test_ae_title_not_string(self):
""" AE should fail if ae_title is not a str """
self.assertRaises(TypeError, AE, 55, 0, [VerificationSOPClass])
def test_ae_title_invalid_chars(self):
""" AE should fail if ae_title is not a str """
self.assertRaises(ValueError, AE, 'TEST\ME', 0, [VerificationSOPClass])
self.assertRaises(ValueError, AE, 'TEST\nME', 0, [VerificationSOPClass])
self.assertRaises(ValueError, AE, 'TEST\rME', 0, [VerificationSOPClass])
self.assertRaises(ValueError, AE, 'TEST\tME', 0, [VerificationSOPClass])
def test_port_not_numeric(self):
""" AE should fail if port is not numeric """
self.assertRaises(TypeError, AE, 'TESTSCU', 'a', [VerificationSOPClass])
def test_port_not_int(self):
""" AE should fail if port is not a int """
self.assertRaises(TypeError, AE, 'TESTSCU', 100.8, [VerificationSOPClass])
def test_port_not_positive(self):
""" AE should fail if port is not >= 0 """
self.assertRaises(ValueError, AE, 'TESTSCU', -1, [VerificationSOPClass])
def test_no_sop_classes(self):
""" AE should fail if scu_sop_class and scp_sop_class are both empty lists """
self.assertRaises(ValueError, AE)
def test_sop_classes_not_list(self):
""" AE should fail if scu_sop_class or scp_sop_class are not lists """
self.assertRaises(ValueError, AE, 'TEST', 0, VerificationSOPClass, [])
self.assertRaises(ValueError, AE, 'TEST', 0, [], VerificationSOPClass)
def test_sop_classes_not_list_of_sop_class(self):
""" AE should fail if scu_sop_class or scp_sop_class are not lists of SOP classes """
self.assertRaises(ValueError, AE, 'TEST', 0, [1, 2, 'a'], [])
self.assertRaises(ValueError, AE, 'TEST', 0, [], [1, 'a', 3])
def test_sop_classes_bad_class(self):
""" AE should fail if given bad sop classes """
self.assertRaises(ValueError, AE, 'TEST', 0, ['1.2.840.10008.1.1.'], [])
self.assertRaises(ValueError, AE, 'TEST', 0, ['1.2.840.10008.1.1', 1, 'abst'], [])
self.assertRaises(ValueError, AE, 'TEST', 0, ['1.2.840.10008.1.1', '1.2.840.1.1.'], [])
self.assertRaises(ValueError, AE, 'TEST', 0, | |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions and classes for dsub command-line parameters."""
from __future__ import absolute_import
from __future__ import print_function
import argparse
import csv
import datetime
import os
import re
import sys
from . import dsub_util
from . import job_model
from .._dsub_version import DSUB_VERSION
from dateutil.tz import tzlocal
import pytz
import six
AUTO_PREFIX_INPUT = 'INPUT_' # Prefix for auto-generated input names
AUTO_PREFIX_OUTPUT = 'OUTPUT_' # Prefix for auto-generated output names
class ListParamAction(argparse.Action):
"""Append each value as a separate element to the parser destination.
This class satisifes the action interface of argparse.ArgumentParser and
refines the 'append' action for arguments with `nargs='*'`.
For the parameters:
--myarg val1 val2 --myarg val3
The 'append' action yields:
args.myval = ['val1 val2', 'val3']
While ListParamAction yields:
args.myval = ['val1', 'val2', 'val3']
"""
def __init__(self, option_strings, dest, **kwargs):
super(ListParamAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
params = getattr(namespace, self.dest, [])
# Input comes in as a list (possibly len=1) of NAME=VALUE pairs
for arg in values:
params.append(arg)
setattr(namespace, self.dest, params)
class FileParamUtil(object):
"""Base class helper for producing FileParams from args or a tasks file.
InputFileParams and OutputFileParams can be produced from either arguments
passed on the command-line or as a combination of the definition in the tasks
file header plus cell values in task records.
This class encapsulates the generation of the FileParam name, if none is
specified (get_variable_name()) as well as common path validation for
input and output arguments (validate_paths).
"""
def __init__(self, auto_prefix, relative_path):
self.param_class = job_model.FileParam
self._auto_prefix = auto_prefix
self._auto_index = 0
self._relative_path = relative_path
def get_variable_name(self, name):
"""Produce a default variable name if none is specified."""
if not name:
name = '%s%s' % (self._auto_prefix, self._auto_index)
self._auto_index += 1
return name
def rewrite_uris(self, raw_uri, file_provider):
"""Accept a raw uri and return rewritten versions.
This function returns a normalized URI and a docker path. The normalized
URI may have minor alterations meant to disambiguate and prepare for use
by shell utilities that may require a specific format.
The docker rewriter makes substantial modifications to the raw URI when
constructing a docker path, but modifications must follow these rules:
1) System specific characters are not allowed (ex. indirect paths).
2) The path, if it is a directory, must end in a forward slash.
3) The path will begin with the value set in self._relative_path.
4) The path will have an additional prefix (after self._relative_path) set
by the file provider-specific rewriter.
Rewrite output for the docker path:
>>> out_util = FileParamUtil('AUTO_', 'output')
>>> out_util.rewrite_uris('gs://mybucket/myfile.txt', job_model.P_GCS)[1]
'output/gs/mybucket/myfile.txt'
>>> out_util.rewrite_uris('./data/myfolder/', job_model.P_LOCAL)[1]
'output/file/data/myfolder/'
When normalizing the URI for cloud buckets, no rewrites are done. For local
files, the user directory will be expanded and relative paths will be
converted to absolute:
>>> in_util = FileParamUtil('AUTO_', 'input')
>>> in_util.rewrite_uris('gs://mybucket/gcs_dir/', job_model.P_GCS)[0]
'gs://mybucket/gcs_dir/'
>>> in_util.rewrite_uris('/data/./dir_a/../myfile.txt',
... job_model.P_LOCAL)[0]
'/data/myfile.txt'
>>> in_util.rewrite_uris('file:///tmp/data/*.bam', job_model.P_LOCAL)[0]
'/tmp/data/*.bam'
Args:
raw_uri: (str) the path component of the raw URI.
file_provider: a valid provider (contained in job_model.FILE_PROVIDERS).
Returns:
normalized: a cleaned version of the uri provided by command line.
docker_path: the uri rewritten in the format required for mounting inside
a docker worker.
Raises:
ValueError: if file_provider is not valid.
"""
if file_provider == job_model.P_GCS:
normalized, docker_path = _gcs_uri_rewriter(raw_uri)
elif file_provider == job_model.P_LOCAL:
normalized, docker_path = self._local_uri_rewriter(raw_uri)
else:
raise ValueError('File provider not supported: %r' % file_provider)
return normalized, os.path.join(self._relative_path, docker_path)
@staticmethod
def _local_uri_rewriter(raw_uri):
"""Rewrite local file URIs as required by the rewrite_uris method.
Local file paths, unlike GCS paths, may have their raw URI simplified by
os.path.normpath which collapses extraneous indirect characters.
>>> FileParamUtil._local_uri_rewriter('/tmp/a_path/../B_PATH/file.txt')
('/tmp/B_PATH/file.txt', 'file/tmp/B_PATH/file.txt')
>>> FileParamUtil._local_uri_rewriter('/myhome/./mydir/')
('/myhome/mydir/', 'file/myhome/mydir/')
The local path rewriter will also work to preserve relative paths even
when creating the docker path. This prevents leaking of information on the
invoker's system to the remote system. Doing this requires a number of path
substitutions denoted with the _<rewrite>_ convention.
>>> FileParamUtil._local_uri_rewriter('./../upper_dir/')[1]
'file/_dotdot_/upper_dir/'
>>> FileParamUtil._local_uri_rewriter('~/localdata/*.bam')[1]
'file/_home_/localdata/*.bam'
Args:
raw_uri: (str) the raw file or directory path.
Returns:
normalized: a simplified and/or expanded version of the uri.
docker_path: the uri rewritten in the format required for mounting inside
a docker worker.
"""
# The path is split into components so that the filename is not rewritten.
raw_path, filename = os.path.split(raw_uri)
# Generate the local path that can be resolved by filesystem operations,
# this removes special shell characters, condenses indirects and replaces
# any unnecessary prefix.
prefix_replacements = [('file:///', '/'), ('~/', os.getenv('HOME')),
('./', ''), ('file:/', '/')]
normed_path = raw_path
for prefix, replacement in prefix_replacements:
if normed_path.startswith(prefix):
normed_path = os.path.join(replacement, normed_path[len(prefix):])
# Because abspath strips the trailing '/' from bare directory references
# other than root, this ensures that all directory references end with '/'.
normed_uri = directory_fmt(os.path.abspath(normed_path))
normed_uri = os.path.join(normed_uri, filename)
# Generate the path used inside the docker image;
# 1) Get rid of extra indirects: /this/./that -> /this/that
# 2) Rewrite required indirects as synthetic characters.
# 3) Strip relative or absolute path leading character.
# 4) Add 'file/' prefix.
docker_rewrites = [(r'/\.\.', '/_dotdot_'), (r'^\.\.', '_dotdot_'),
(r'^~/', '_home_/'), (r'^file:/', '')]
docker_path = os.path.normpath(raw_path)
for pattern, replacement in docker_rewrites:
docker_path = re.sub(pattern, replacement, docker_path)
docker_path = docker_path.lstrip('./') # Strips any of '.' './' '/'.
docker_path = directory_fmt('file/' + docker_path) + filename
return normed_uri, docker_path
@staticmethod
def parse_file_provider(uri):
"""Find the file provider for a URI."""
providers = {'gs': job_model.P_GCS, 'file': job_model.P_LOCAL}
# URI scheme detector uses a range up to 30 since none of the IANA
# registered schemes are longer than this.
provider_found = re.match(r'^([A-Za-z][A-Za-z0-9+.-]{0,29})://', uri)
if provider_found:
prefix = provider_found.group(1).lower()
else:
# If no provider is specified in the URI, assume that the local
# filesystem is being used. Availability and validity of the local
# file/directory will be checked later.
prefix = 'file'
if prefix in providers:
return providers[prefix]
else:
raise ValueError('File prefix not supported: %s://' % prefix)
@staticmethod
def _validate_paths_or_fail(uri, recursive):
"""Do basic validation of the uri, return the path and filename."""
path, filename = os.path.split(uri)
# dsub could support character ranges ([0-9]) with some more work, but for
# now we assume that basic asterisk wildcards are sufficient. Reject any URI
# that includes square brackets or question marks, since we know that
# if they actually worked, it would be accidental.
if '[' in uri or ']' in uri:
raise ValueError(
'Square bracket (character ranges) are not supported: %s' % uri)
if '?' in uri:
raise ValueError('Question mark wildcards are not supported: %s' % uri)
# Only support file URIs and *filename* wildcards
# Wildcards at the directory level or "**" syntax would require better
# support from the Pipelines API *or* doing expansion here and
# (potentially) producing a series of FileParams, instead of one.
if '*' in path:
raise ValueError(
'Path wildcard (*) are only supported for files: %s' % uri)
if '**' in filename:
raise ValueError('Recursive wildcards ("**") not supported: %s' % uri)
if filename in ('..', '.'):
raise ValueError('Path characters ".." and "." not supported '
'for file names: %s' % uri)
# Do not allow non-recursive IO to reference directories.
if not recursive and not filename:
raise ValueError('Input or output values that are not recursive must '
'reference a filename or wildcard: %s' % uri)
def parse_uri(self, raw_uri, recursive):
"""Return a valid docker_path, uri, and file provider from a flag value."""
# Assume recursive URIs are directory paths.
if recursive:
raw_uri = directory_fmt(raw_uri)
# Get the file provider, validate the raw URI, and rewrite the path
# component of the URI for docker | |
#
# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from keystone.common import sql
from keystone.common import provider_api
try: from oslo_utils import timeutils
except ImportError: from keystone.openstack.common import timeutils
from keystone import exception
from keystone.identity.backends.sql import Identity
try: from keystone.identity.backends.sql import User
except ImportError: from keystone.identity.backends.sql_model import User
from keystone_spassword.contrib.spassword import Driver
from keystone_spassword.contrib.spassword.mailer import SendMail
try: from oslo_log import log
except ImportError: from keystone.openstack.common import log
try: from oslo_config import cfg
except ImportError: from oslo.config import cfg
CONF = cfg.CONF
LOG = log.getLogger(__name__)
PROVIDERS = provider_api.ProviderAPIs
class SPasswordSecurityError(exception.Error):
def _build_message(self, message, **kwargs):
return '%(message)s' % {
'message': message or self.message_format % kwargs}
class SPasswordUnauthorized(SPasswordSecurityError):
message_format = "The request you have made requires authentication."
code = 401
title = 'Unauthorized'
class SPasswordModel(sql.ModelBase, sql.ModelDictMixinWithExtras):
__tablename__ = 'spassword'
attributes = ['user_id', 'user_name', 'domain_id', 'creation_time',
'login_attempts', 'last_login_attempt_time',
'sndfa', 'sndfa_last', 'sndfa_code', 'sndfa_time_code',
'sndfa_email', 'sndfa_email_code'
]
user_id = sql.Column(sql.String(64), primary_key=True)
user_name = sql.Column(sql.String(255), default=None)
domain_id = sql.Column(sql.String(64), default=None)
creation_time = sql.Column(sql.DateTime(), default=None)
login_attempts = sql.Column(sql.Integer, default=0)
last_login_attempt_time = sql.Column(sql.DateTime(), default=None)
# bad_attempts
extra = sql.Column(sql.JsonBlob())
# sndfa
sndfa = sql.Column(sql.Boolean(), default=False)
sndfa_last = sql.Column(sql.DateTime(), default=None)
sndfa_code = sql.Column(sql.String(32), default=None)
sndfa_time_code = sql.Column(sql.DateTime(), default=None)
sndfa_email = sql.Column(sql.Boolean(), default=False)
sndfa_email_code = sql.Column(sql.String(32), default=None)
def get_user_session(user_id):
try:
session = sql.get_session()
user_ref = session.query(User).get(user_id) # TBD: wath if LDAP
except Exception:
with sql.session_for_read() as session:
user_ref = session.query(User).get(user_id)
return user_ref, session
def get_spassword_session(user_id):
try:
session = sql.get_session()
spassword_ref = session.query(SPasswordModel).get(user_id)
except Exception:
with sql.session_for_read() as session:
spassword_ref = session.query(SPasswordModel).get(user_id)
return spassword_ref, session
class SPassword(Driver):
def get_user(self, user_id):
user_ref, session = get_user_session(user_id)
if not user_ref:
raise exception.UserNotFound(user_id=user_id)
return user_ref
def remove_user(self, user_id):
spassword_ref, session = get_spassword_session(user_id)
LOG.info('removing user %s from spassword' % user_id)
if spassword_ref:
with session.begin():
session.delete(spassword_ref)
def set_user_creation_time(self, user):
spassword_ref, session = get_spassword_session(user['id'])
LOG.debug('set user creation time for %s' % user['id'])
if not spassword_ref:
data_user = {}
data_user['user_id'] = user['id']
data_user['user_name'] = user['name']
data_user['creation_time'] = datetime.datetime.utcnow()
data_user['domain_id'] = user['domain_id']
data_user['sndfa'] = False
data_user['sndfa_last'] = None
data_user['sndfa_code'] = None
data_user['sndfa_time_code'] = None
data_user['sndfa_email'] = False
data_user['sndfa_email_code'] = None
spassword_ref = SPasswordModel.from_dict(data_user)
else:
# TODO: Never reached?
LOG.info('user %s already created in spassword, just updating' % user['id'])
spassword_ref['creation_time'] = datetime.datetime.utcnow()
spassword_ref['login_attempts'] = 0
# A new session is needed
with session.begin():
session.add(spassword_ref)
return spassword_ref.to_dict()
def update_user_modification_time(self, user):
spassword_ref, session = get_spassword_session(user['id'])
LOG.debug('update user modification time for %s' % user['id'])
if spassword_ref:
spassword_ref['creation_time'] = datetime.datetime.utcnow()
spassword_ref['login_attempts'] = 0
else:
data_user = {}
data_user['user_id'] = user['id']
data_user['user_name'] = user['name']
data_user['domain_id'] = user['domain_id']
data_user['creation_time'] = datetime.datetime.utcnow()
data_user['sndfa'] = False
data_user['sndfa_last'] = None
data_user['sndfa_code'] = None
data_user['sndfa_time_code'] = None
data_user['sndfa_email'] = False
data_user['sndfa_email_code'] = None
spassword_ref = SPasswordModel.from_dict(data_user)
spassword_ref['login_attempts'] = 0
with session.begin():
session.add(spassword_ref)
# Second Factor Auth methods
#
def set_user_sndfa_code(self, user, newcode):
spassword_ref, session = get_spassword_session(user['id'])
LOG.debug('set user sndfa code %s for user %s' % (newcode, user['id']))
if spassword_ref:
spassword = spassword_ref.to_dict()
if 'sndfa' in spassword:
if spassword['sndfa'] and spassword['sndfa_email']:
spassword_ref['sndfa_time_code'] = datetime.datetime.utcnow()
spassword_ref['sndfa_code'] = newcode
with session.begin():
session.add(spassword_ref)
else:
LOG.warn('user %s still has not sndfa enabled or email verified' % user['id'])
else:
data_user = {}
data_user['sndfa'] = False
data_user['sndfa_last'] = None
data_user['sndfa_code'] = None
data_user['sndfa_time_code'] = None
data_user['sndfa_email'] = False
data_user['sndfa_email_code'] = None
spassword_ref = SPasswordModel.from_dict(data_user)
with session.begin():
session.add(spassword_ref)
else:
LOG.warn('user %s still has not spassword data' % user['id'])
def modify_sndfa(self, user_id, enable):
spassword_ref, session = get_spassword_session(user_id)
if spassword_ref:
spassword = spassword_ref.to_dict()
if spassword['sndfa_email']:
spassword_ref['sndfa'] = enable
with session.begin():
session.add(spassword_ref)
return True
else:
LOG.warn('user %s still has not sndfa enabled or email verified' % user_id)
else:
LOG.warn('user %s still has not spassword data' % user_id)
return False
def check_sndfa_code(self, user_id, code):
spassword_ref, session = get_spassword_session(user_id)
if spassword_ref:
spassword = spassword_ref.to_dict()
if spassword['sndfa'] and spassword['sndfa_email']:
checked = spassword['sndfa_code'] == code
if checked:
spassword_ref['sndfa_last'] = datetime.datetime.utcnow()
with session.begin():
session.add(spassword_ref)
return checked
else:
LOG.warn('user %s still has not sndfa enabled or email verified' % user_id)
else:
LOG.warn('user %s still has not spassword data' % user_id)
return False
def already_sndfa_signed(self, user):
spassword_ref, session = get_spassword_session(user['id'])
if spassword_ref:
spassword = spassword_ref.to_dict()
if spassword['sndfa'] and spassword['sndfa_email']:
if (spassword['sndfa_last'] and
spassword['sndfa_last'] < datetime.datetime.utcnow() + \
datetime.timedelta(hours=CONF.spassword.sndfa_time_window)):
LOG.debug('user %s sndfa verified' % user['id'])
return True
else:
LOG.debug('user %s sndfa expired' % user['id'])
else:
LOG.warn('user %s still has not spassword data' % user['id'])
return False
def set_check_email_code(self, user_id, newcode):
spassword_ref, session = get_spassword_session(user_id)
if spassword_ref:
spassword_ref['sndfa_email_code'] = newcode
spassword_ref['sndfa_email'] = False
with session.begin():
session.add(spassword_ref)
else:
LOG.warn('user %s still has not spassword data' % user_id)
def check_email_code(self, user_id, code):
spassword_ref, session = get_spassword_session(user_id)
check = False
if spassword_ref:
spassword = spassword_ref.to_dict()
if spassword['sndfa_email_code']:
LOG.debug('check email code user_id %s code %s sndfa_email_code %s ' % (user_id, code, spassword['sndfa_email_code']))
check = spassword['sndfa_email_code'] == code
spassword_ref['sndfa_email'] = check
with session.begin():
session.add(spassword_ref)
else:
LOG.warn('user %s still has not spassword data' % user_id)
return check
def already_email_checked(self, user_id):
spassword_ref, session = get_spassword_session(user_id)
if spassword_ref:
spassword = spassword_ref.to_dict()
if 'sndfa_email' in spassword:
return spassword['sndfa_email']
else:
LOG.warn('user %s still has not sndfa_email data' % user_id)
data_user = {}
data_user['sndfa'] = False
data_user['sndfa_last'] = None
data_user['sndfa_code'] = None
data_user['sndfa_time_code'] = None
data_user['sndfa_email'] = False
data_user['sndfa_email_code'] = None
spassword_ref = SPasswordModel.from_dict(data_user)
with session.begin():
session.add(spassword_ref)
else:
LOG.warn('user %s still has not spassword data' % user_id)
return False
class Identity(Identity, SendMail):
def _check_password(self, password, user_ref):
if CONF.spassword.enabled:
# Check if password has been expired
spassword_ref, session = get_spassword_session(user_ref['id'])
if (not (spassword_ref == None)) and \
(not user_ref['id'] in CONF.spassword.pwd_user_blacklist):
# Check password time
expiration_date = datetime.datetime.utcnow() - \
datetime.timedelta(days=CONF.spassword.pwd_exp_days)
spassword = spassword_ref.to_dict()
if (spassword['creation_time'] < expiration_date):
LOG.warn('password of user %s %s expired ' % (user_ref['id'],
user_ref['name']))
res = False
auth_error_msg = ('Password expired for user %s. Contact with your ' +
'admin') % spassword['user_name']
raise SPasswordUnauthorized(auth_error_msg)
res = super(Identity, self)._check_password(password, user_ref)
return res
# Identity interface
def authenticate(self, user_id, password):
if CONF.spassword.enabled and \
not (user_id in CONF.spassword.pwd_user_blacklist):
spassword_ref, session = get_spassword_session(user_id)
if spassword_ref:
spassword = spassword_ref.to_dict()
if spassword['login_attempts'] > CONF.spassword.pwd_max_tries:
# Check last block attempt
if (spassword['last_login_attempt_time'] > \
datetime.datetime.utcnow() - \
datetime.timedelta(minutes=CONF.spassword.pwd_block_minutes)):
LOG.warn('max number of tries reach for login %s' % spassword['user_name'])
auth_error_msg = ('Password temporarily blocked for user %s due to reach' +
' max number of tries. Contact with your ' +
' admin') % spassword['user_name']
raise SPasswordUnauthorized(auth_error_msg)
try:
res = super(Identity, self).authenticate(user_id, password)
except AssertionError:
res = False
auth_error_msg = 'Invalid username or password'
if CONF.spassword.enabled:
spassword_ref, session = get_spassword_session(user_id)
current_attempt_time = datetime.datetime.utcnow()
if spassword_ref:
spassword = spassword_ref.to_dict()
if not res:
LOG.debug('wrong password provided at login %s' % spassword['user_name'])
spassword_ref['login_attempts'] += 1
else:
spassword_ref['login_attempts'] = 0
expiration_date = spassword_ref['creation_time'] + \
datetime.timedelta(days=CONF.spassword.pwd_exp_days)
res['extras'] = {
"password_creation_time": timeutils.isotime(spassword['creation_time']),
"password_expiration_time": timeutils.isotime(expiration_date),
"pwd_user_in_blacklist": user_id in CONF.spassword.pwd_user_blacklist,
"last_login_attempt_time": spassword['last_login_attempt_time']
}
# Update login attempt time
spassword_ref['last_login_attempt_time'] = current_attempt_time
# Check if sndfa_email in user
if res and CONF.spassword.sndfa and 'sndfa_email' in spassword:
# Put sndfa and sndfa_email info
res['extras']['sndfa_email'] = spassword['sndfa_email']
# Check if sndfa in user
if res and CONF.spassword.sndfa and 'sndfa' in spassword and spassword['sndfa']:
# Put sndfa and sndfa_email info
res['extras']['sndfa'] = spassword['sndfa']
if spassword['sndfa_email']:
if (spassword['sndfa_last'] and
spassword['sndfa_last'] > datetime.datetime.utcnow() - \
datetime.timedelta(hours=CONF.spassword.sndfa_time_window)):
LOG.debug('user %s was already validated with 2fa' % user_id)
else:
# Should retry code that was sent email
LOG.debug('user %s was not validated with 2fa due to code' % user_id)
if (spassword['sndfa_time_code'] and
spassword['sndfa_time_code'] > datetime.datetime.utcnow() - \
datetime.timedelta(hours=CONF.spassword.sndfa_time_window)):
code = spassword['sndfa_code']
else:
code = uuid.uuid4().hex[:6]
PROVIDERS.spassword_api.set_user_sndfa_code(self.get_user(user_id), code)
to = self.get_user(user_id)['email']
subject = 'IoT Platform second | |
<gh_stars>0
#!/usr/bin/env python3
import sys
import os
import re
import argparse
import pathlib
import gzip
import threading
from joblib import Parallel, delayed
# These are the effects we need to focus on.
EFFECTS = [
'frameshift_variant',
'exon_loss_variant',
'duplication',
'inversion',
'feature_ablation',
'gene_fusion',
'rearranged_at_DNA_level',
'missense_variant',
'protein_protein_contact',
'structural_interaction_variant',
'rare_amino_acid_variant',
'splice_acceptor_variant',
'splice_donor_variant',
'stop_lost',
'start_lost',
'stop_gained',
'inframe_insertion',
'disruptive_inframe_insertion',
'inframe_deletion',
'disruptive_inframe_deletion'
]
def clean_amino_acid_change_string(amino_acid_change):
amino_acid_change = re.sub('Gly', 'G', amino_acid_change)
amino_acid_change = re.sub('Pro', 'P', amino_acid_change)
amino_acid_change = re.sub('Val', 'V', amino_acid_change)
amino_acid_change = re.sub('Leu', 'L', amino_acid_change)
amino_acid_change = re.sub('Met', 'M', amino_acid_change)
amino_acid_change = re.sub('Cys', 'C', amino_acid_change)
amino_acid_change = re.sub('Phe', 'F', amino_acid_change)
amino_acid_change = re.sub('Tyr', 'Y', amino_acid_change)
amino_acid_change = re.sub('Trp', 'W', amino_acid_change)
amino_acid_change = re.sub('His', 'H', amino_acid_change)
amino_acid_change = re.sub('Lys', 'K', amino_acid_change)
amino_acid_change = re.sub('Arg', 'R', amino_acid_change)
amino_acid_change = re.sub('Gln', 'Q', amino_acid_change)
amino_acid_change = re.sub('Asp', 'D', amino_acid_change)
amino_acid_change = re.sub('Ser', 'S', amino_acid_change)
amino_acid_change = re.sub('Thr', 'T', amino_acid_change)
amino_acid_change = re.sub('Asn', 'N', amino_acid_change)
amino_acid_change = re.sub('Ile', 'I', amino_acid_change)
amino_acid_change = re.sub('Glu', 'E', amino_acid_change)
amino_acid_change = re.sub('Ala', 'A', amino_acid_change)
amino_acid_change = re.sub('p\\.', '', amino_acid_change)
return amino_acid_change
# Generate reference dictionary
# This function is for joblib parallel
def generate_reference_dictionary(line):
reference_dictionary = {}
values = line.strip("\n").split("\t")
if values[0] not in reference_dictionary.keys():
reference_dictionary[values[0]] = {
"Sample": values[0],
"Maturity_Group": str(values[1]) if (
isinstance(values[1], int) or
str(values[1]).isdigit() or
isinstance(values[1], float) or
str(values[1]).replace('.', '', 1).isdigit()) else "",
"Country": values[2],
"State": values[3],
"Improvement_Status": values[4],
"Classification": values[5]
}
else:
reference_dictionary[values[0]]["Sample"] = values[0]
reference_dictionary[values[0]]["Maturity_Group"] = int(values[1])
reference_dictionary[values[0]]["Country"] = values[2]
reference_dictionary[values[0]]["State"] = values[3]
reference_dictionary[values[0]]["Improvement_Status"] = values[4]
reference_dictionary[values[0]]["Classification"] = values[5]
return reference_dictionary
# Generate gff dictionary
# This function is for joblib parallel
def generate_gff_dictionary(line, gff_category, gff_key):
gff_dictionary = {}
if (line.startswith("#")) or (re.search(gff_category, line) is None) or \
(re.search(gff_key, line) is None) or (re.search("scaffold", line) is not None):
return None
else:
values = line.strip("\n").split("\t")
chromosome = values[0]
category = values[2]
start = values[3]
stop = values[4]
key = ""
key_array = values[8].strip().split(";")
if category != gff_category:
return None
else:
for i in range(len(key_array)):
if str(key_array[i]).startswith(gff_key):
key = re.sub("(.*=)|(.*:)|(.*-)", "", re.sub(gff_key, "", str(key_array[i])))
break
if (key != "") and (key not in gff_dictionary.keys()):
gff_dictionary[key.upper()] = {
"Chromosome": chromosome,
"Category": category,
"Start": start,
"Stop": stop,
"Gene": key
}
elif (key != "") and (key in gff_dictionary.keys()):
gff_dictionary[key.upper()]["Chromosome"] = chromosome
gff_dictionary[key.upper()]["Category"] = category
gff_dictionary[key.upper()]["Start"] = start
gff_dictionary[key.upper()]["Stop"] = stop
gff_dictionary[key.upper()]["Gene"] = key
return gff_dictionary
# Generate allele catalog
def generate_allele_catalog(header, line, reference_dictionary, gff_dictionary, output_file_path, lock):
# Parse header and line to get variant
accessions = str(header).strip().split("\t")[9:]
line_array = str(line).strip().split("\t")
chromosome = line_array[0]
position = line_array[1]
reference_allele = line_array[3]
annotated_reference_allele = reference_allele + "|Ref"
alternate_alleles = line_array[4].split(",")
genotypes = [re.split('/|\\|', genotype)[0] for genotype in line_array[9:]]
info_dict = {}
info = line_array[7].split(";")
functional_effect_annotation_string = ""
for i in range(len(info)):
if info[i].startswith("EFF="):
functional_effect_annotation_string = re.sub("EFF=", "", info[i])
functional_effect_annotation_string = functional_effect_annotation_string.strip()
break
if info[i].startswith("ANN="):
functional_effect_annotation_string = re.sub("ANN=", "", info[i])
functional_effect_annotation_string = functional_effect_annotation_string.strip()
break
if functional_effect_annotation_string != "":
functional_effect_annotation_string_array = functional_effect_annotation_string.split(",")
for i in range(len(functional_effect_annotation_string_array)):
single_functional_effect_annotation = re.split('\\|', str(functional_effect_annotation_string_array[i]))
# Check if it is a primary transcript.
# We only consider primary transcript.
pattern = re.sub("\\\\", "\\\\\\\\", single_functional_effect_annotation[3])
pattern = re.sub("\\+", "\\+", pattern)
pattern = re.sub("\\*", "\\*", pattern)
pattern = re.sub("\\?", "\\?", pattern)
pattern = re.sub("\\(", "\\(", pattern)
pattern = re.sub("\\)", "\\)", pattern)
pattern = re.sub("\\[", "\\[", pattern)
if str(single_functional_effect_annotation[6]).find(str(pattern+".1")) != -1:
allele = str(single_functional_effect_annotation[0]).strip()
gene = str(single_functional_effect_annotation[3]).strip()
functional_effect = str(single_functional_effect_annotation[1]).strip()
amino_acid_change = str(single_functional_effect_annotation[10]).strip()
if any([str(functional_effect).find(effect) != -1 for effect in EFFECTS]):
if amino_acid_change != "":
amino_acid_change = clean_amino_acid_change_string(amino_acid_change)
if gene.upper() not in info_dict.keys():
info_dict[gene.upper()] = {}
if allele not in info_dict[gene.upper()].keys():
info_dict[gene.upper()][allele] = {
'Functional_Effects': functional_effect,
'Amino_Acid_Changes': amino_acid_change
}
else:
if info_dict[gene.upper()][allele]['Functional_Effects'] == "":
info_dict[gene.upper()][allele]['Functional_Effects'] = functional_effect
else:
info_dict[gene.upper()][allele]['Functional_Effects'] = info_dict[gene.upper()][allele]['Functional_Effects']+'&'+functional_effect
if info_dict[gene.upper()][allele]['Amino_Acid_Changes'] == "":
info_dict[gene.upper()][allele]['Amino_Acid_Changes'] = amino_acid_change
else:
info_dict[gene.upper()][allele]['Amino_Acid_Changes'] = info_dict[gene.upper()][allele]['Amino_Acid_Changes']+'&'+amino_acid_change
else:
if allele not in info_dict[gene.upper()].keys():
info_dict[gene.upper()][allele] = {
'Functional_Effects': functional_effect,
'Amino_Acid_Changes': amino_acid_change
}
else:
if info_dict[gene.upper()][allele]['Functional_Effects'] == "":
info_dict[gene.upper()][allele]['Functional_Effects'] = functional_effect
else:
info_dict[gene.upper()][allele]['Functional_Effects'] = info_dict[gene.upper()][allele]['Functional_Effects']+'&'+functional_effect
if info_dict[gene.upper()][allele]['Amino_Acid_Changes'] == "":
info_dict[gene.upper()][allele]['Amino_Acid_Changes'] = amino_acid_change
else:
info_dict[gene.upper()][allele]['Amino_Acid_Changes'] = info_dict[gene.upper()][allele]['Amino_Acid_Changes']+'&'+amino_acid_change
genes = []
for key in gff_dictionary.keys():
if (gff_dictionary[key]["Chromosome"] == chromosome) and (float(gff_dictionary[key]["Start"].strip()) <= float(position.strip()) <= float(gff_dictionary[key]["Stop"].strip())):
gene = gff_dictionary[key]["Gene"]
if gene is not None:
if gene != "":
genes.append(gene)
# Collect all genotype information and write to the output file
if ((len(genes) > 0) and (len(accessions) > 0) and (len(genotypes) > 0) and (len(accessions) == len(genotypes))):
for i in range(len(genes)):
for j in range(len(accessions)):
try:
genotype = ""
genotype_with_description = ""
if genotypes[j] is not None:
if genotypes[j] != "":
genotype_index = int(genotypes[j])
if genotype_index == 0:
genotype = reference_allele
genotype_with_description = annotated_reference_allele
elif genotype_index > 0:
genotype = alternate_alleles[genotype_index-1]
flags = [str(info_key).find(genes[i].upper()) != -1 for info_key in info_dict.keys()]
flags = [flag_index for flag_index, flag in enumerate(flags) if flag]
if (len(flags) == 1) and (list(info_dict.keys())[flags[0]] in info_dict.keys()):
if genotype in info_dict[genes[i].upper()].keys():
if info_dict[genes[i].upper()][genotype]['Functional_Effects'] != "":
genotype_with_description = genotype+"|"+info_dict[genes[i].upper()][genotype]['Functional_Effects']
if info_dict[genes[i].upper()][genotype]['Amino_Acid_Changes'] != "":
genotype_with_description = genotype_with_description+"|"+info_dict[genes[i].upper()][genotype]['Amino_Acid_Changes']
else:
genotype_with_description = genotype+"|Alt"
else:
genotype_with_description = genotype+"|Alt"
else:
genotype_with_description = genotype+"|Alt"
# Write to the output file
lock.acquire()
with open(output_file_path, "a") as writer:
if (accessions[j] in reference_dictionary.keys()) and (genes[i] != "") and (genotype != "") and (genotype_with_description != "") and (genotype != "<INS>") and (genotype != "<DEL>"):
writer.write(
reference_dictionary[accessions[j]]["Classification"] + "\t" +
reference_dictionary[accessions[j]]["Improvement_Status"] + "\t" +
reference_dictionary[accessions[j]]["Maturity_Group"] + "\t" +
reference_dictionary[accessions[j]]["Country"] + "\t" +
reference_dictionary[accessions[j]]["State"] + "\t" +
accessions[j] + "\t" +
chromosome + "\t" +
genes[i] + "\t" +
position + "\t" +
genotype + "\t" +
genotype_with_description + "\n"
)
elif (accessions[j] not in reference_dictionary.keys()) and (genes[i] != "") and (genotype != "") and (genotype_with_description != "") and (genotype != "<INS>") and (genotype != "<DEL>"):
writer.write(
"" + "\t" +
"" + "\t" +
"" + "\t" +
"" + "\t" +
"" + "\t" +
accessions[j] + "\t" +
chromosome + "\t" +
genes[i] + "\t" +
position + "\t" +
genotype + "\t" +
genotype_with_description + "\n"
)
lock.release()
except Exception as e:
print(e)
def main(args):
#######################################################################
# Get arguments
#######################################################################
input_file_path = args.input_file
reference_file_path = args.reference_file
gff_file_path = args.gff_file
output_file_path = args.output_file
n_jobs = args.threads
gff_category = args.gff_category
gff_key = args.gff_key
#######################################################################
# Create a threading lock
#######################################################################
lock = threading.Lock()
#######################################################################
# Check if output parent folder exists
# If not, create the output parent folder
#######################################################################
if not output_file_path.parent.exists():
try:
output_file_path.parent.mkdir(parents=True)
except FileNotFoundError as e:
pass
except FileExistsError as e:
pass
except Exception as e:
pass
if not output_file_path.parent.exists():
sys.exit(1)
#######################################################################
# Read reference file then make a reference dictionary
#######################################################################
reference_dictionary = {}
if reference_file_path is not None:
with open(reference_file_path, "r") as reader:
header = reader.readline()
reference_column_names = header.strip().split("\t")
# The results is a dictionary list, unwind the dictionary list and create dictionary with all samples.
results = Parallel(n_jobs=n_jobs)(
delayed(generate_reference_dictionary)(line) for line in reader
)
for i in range(len(results)):
for key in results[i].keys():
if key not in reference_dictionary.keys():
reference_dictionary[key] = results[i][key]
#######################################################################
# Read gff file then make a gff dictionary
#######################################################################
with open(gff_file_path, "r") as reader:
results = Parallel(n_jobs=n_jobs)(
delayed(generate_gff_dictionary)(line.strip(), gff_category, gff_key) for line in reader
)
gff_dictionary = {}
for i in range(len(results)):
if results[i] is not None:
for key in results[i].keys():
if key not in gff_dictionary.keys():
gff_dictionary[key] = results[i][key]
#######################################################################
# Write header to the output file
#######################################################################
writer = open(output_file_path, "w")
writer.write(
"Classification\tImprovement_Status\tMaturity_Group\tCountry\tState\t" +
"Accession\tChromosome\tGene\tPosition\tGenotype\tGenotype_with_Description\n"
)
writer.close()
#######################################################################
# Read input file then generate allele catalog
#######################################################################
if str(input_file_path).endswith('gz'):
with gzip.open(input_file_path, 'rt') as reader:
header = ""
while not header.strip().startswith("#CHROM"):
header = reader.readline()
Parallel(n_jobs=n_jobs, backend="threading")(
delayed(generate_allele_catalog)(header, line, reference_dictionary, gff_dictionary, output_file_path, lock)
for line in reader
)
else:
with open(input_file_path, "r") as reader:
header = ""
while not header.strip().startswith("#CHROM"):
header = reader.readline()
Parallel(n_jobs=n_jobs, backend="threading")(
delayed(generate_allele_catalog)(header, line, reference_dictionary, gff_dictionary, output_file_path, lock)
for line in reader
)
if __name__ == "__main__":
#######################################################################
# Parse arguments
#######################################################################
parser = argparse.ArgumentParser(prog='generate_Allele_Catalog', description='generate Allele Catalog')
parser.add_argument('-i', '--input_file', help='Input file', type=pathlib.Path, required=True)
parser.add_argument('-g', '--gff_file', help='GFF file', type=pathlib.Path, required=True)
parser.add_argument('-o', '--output_file', help='Output file', type=pathlib.Path, required=True)
parser.add_argument('-r', '--reference_file', help='Reference file', type=pathlib.Path, default=None)
parser.add_argument('-t', '--threads', help='Number of threads', type=int, default=10)
parser.add_argument('-c', '--gff_category', help='Gff category', type=str, default='gene')
parser.add_argument('-k', '--gff_key', help='Gff key', type=str, default='Name')
args = parser.parse_args()
#######################################################################
# Call main function
#######################################################################
| |
showarrow=False))
return annotations
def get_perpendicular_bar(self, bar_vals, i, layout):
if isinstance(self.perpendicular_bar_func, str):
val = pd.Series(bar_vals).agg(self.perpendicular_bar_func)
else:
values = self.df_values.iloc[i]
ranks = self.df_ranks.iloc[i]
val = self.perpendicular_bar_func(values, ranks)
xref, yref = ("x", "paper") if self.orientation == 'h' else ("paper", "y")
value_limit = self.xlimit if self.orientation == 'h' else self.ylimit
if self.fixed_max:
delta = (value_limit[1] - value_limit[0]) * .02
else:
delta = (1.05 * bar_vals.max() - bar_vals.min()) * .02
x0, x1 = (val - delta, val + delta) if self.orientation == 'h' else (0, 1)
y0, y1 = (val - delta, val + delta) if self.orientation == 'v' else (0, 1)
return dict(type="rect", xref=xref, yref=yref, x0=x0, y0=y0, x1=x1, y1=y1,
fillcolor="#444444",layer="below", opacity=.5, line_width=0)
def make_animation(self):
frames, slider_steps = self.get_frames()
data = frames[0].data
layout = frames[0].layout
layout.title = self.title
layout.updatemenus = [dict(
type="buttons",
direction = "left",
x=1,
y=1.02,
xanchor='right',
yanchor='bottom',
buttons=[dict(label="Play",
method="animate",
# redraw must be true for bar plots
args=[None, {"frame": {"duration": self.duration, "redraw": True},
"fromcurrent": True
}]),
dict(label="Pause",
method="animate",
args=[[None], {"frame": {"duration": 0, "redraw": False},
"mode": "immediate",
"transition": {"duration": 0}}]),
]
)]
sliders_dict = {
"active": 0,
"yanchor": "top",
"xanchor": "left",
"currentvalue": {
# "font": {"size": 20},
# "prefix": '', # allow user to set
"visible": False, # just repeats period label
# "xanchor": "right"
},
"transition": {"duration": self.duration, "easing": "cubic-in-out"},
"pad": {"b": 10, "t": 50},
"len": 0.88,
"x": 0.05,
"y": 0,
"steps": slider_steps
}
if self.slider:
layout.sliders = [sliders_dict]
fig = go.Figure(data=data, layout=layout, frames=frames[1:])
if self.filename:
fig.write_html(self.filename, **self.write_html_kwargs)
else:
return fig
def bar_chart_race_plotly(df, filename=None, orientation='h', sort='desc', n_bars=None,
fixed_order=False, fixed_max=False, steps_per_period=10,
period_length=500, end_period_pause=0, interpolate_period=False,
period_label=True, period_template=None, period_summary_func=None,
perpendicular_bar_func=None, colors=None, title=None, bar_size=.95,
bar_textposition='outside', bar_texttemplate=None, bar_label_font=None,
tick_label_font=None, hovertemplate=None, slider=True, scale='linear',
bar_kwargs=None, layout_kwargs=None, write_html_kwargs=None,
filter_column_colors=False):
'''
Create an animated bar chart race using Plotly. Data must be in
'wide' format where each row represents a single time period and each
column represents a distinct category. Optionally, the index can label
the time period. Bar length and location change linearly from one time
period to the next.
Note - The duration of each frame is calculated as
`period_length` / `steps_per_period`, but is unlikely to actually
be this number, especially when duration is low (< 50ms). You may have to
experiment with different combinations of `period_length` and
`steps_per_period` to get the animation at the desired speed.
If no `filename` is given, a plotly figure is returned that is embedded
into the notebook.
Parameters
----------
df : pandas DataFrame
Must be a 'wide' DataFrame where each row represents a single period
of time. Each column contains the values of the bars for that
category. Optionally, use the index to label each time period.
The index can be of any type.
filename : `None` or str, default None
If `None` return plotly animation, otherwise save
to disk. Can only save as HTML at this time.
orientation : 'h' or 'v', default 'h'
Bar orientation - horizontal or vertical
sort : 'desc' or 'asc', default 'desc'
Choose how to sort the bars. Use 'desc' to put largest bars on top
and 'asc' to place largest bars on bottom.
n_bars : int, default None
Choose the maximum number of bars to display on the graph.
By default, use all bars. New bars entering the race will appear
from the edge of the axes.
fixed_order : bool or list, default False
When `False`, bar order changes every time period to correspond
with `sort`. When `True`, bars remained fixed according to their
final value corresponding with `sort`. Otherwise, provide a list
of the exact order of the categories for the entire duration.
fixed_max : bool, default False
Whether to fix the maximum value of the axis containing the values.
When `False`, the axis for the values will have its maximum (x/y)
just after the largest bar of the current time period.
The axis maximum will change along with the data.
When True, the maximum axis value will remain constant for the
duration of the animation. For example, in a horizontal bar chart,
if the largest bar has a value of 100 for the first time period and
10,000 for the last time period. The xlim maximum will be 10,000
for each frame.
steps_per_period : int, default 10
The number of steps to go from one time period to the next.
The bars will grow linearly between each period.
period_length : int, default 500
Number of milliseconds to animate each period (row).
Default is 500ms (half of a second)
end_period_pause : int, default 0
Number of milliseconds to pause the animation at the end of
each period.
interpolate_period : bool, default `False`
Whether to interpolate the period. Only valid for datetime or
numeric indexes. When set to `True`, for example,
the two consecutive periods 2020-03-29 and 2020-03-30 with
`steps_per_period` set to 4 would yield a new index of
2020-03-29 00:00:00
2020-03-29 06:00:00
2020-03-29 12:00:00
2020-03-29 18:00:00
2020-03-30 00:00:00
period_label : bool or dict, default `True`
If `True` or dict, use the index as a large text label
on the figure labeling each period. No label when 'False'.
Use a dictionary to supply the exact position of the period
along with any valid parameters of a plotly annotation.
Example:
{
'x': .99,
'y': .8,
'font' : {'family': 'Helvetica', 'size': 20, 'color': 'orange'},
'xanchor': 'right',
}
Reference - https://plotly.com/python/reference/#layout-annotations
The default location depends on `orientation` and `sort`
* h, desc -> x=.95, y=.15
* h, asc -> x=.95, y=.85
* v, desc -> x=.95, y=.85
* v, asc -> x=.05, y=.85
period_template : str, default `None`
Either a string with date directives or
a new-style (Python 3.6+) formatted string
For a string with a date directive, find the complete list here
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
Example of string with date directives
'%B %d, %Y'
Will change 2020/03/29 to March 29, 2020
For new-style formatted string. Use curly braces and the variable `x`,
which will be passed the current period's index value.
Example:
'Period {x:10.2f}'
Date directives will only be used for datetime indexes.
period_summary_func : function, default None
Custom text added to the axes each period.
Create a user-defined function that accepts two pandas Series of the
current time period's values and ranks. It must return a dictionary
containing at a minimum the keys "x", "y", and "text" which will be
passed used for a plotly annotation.
Example:
def func(values, ranks):
total = values.sum()
text = f'Worldwide deaths: {total}'
return {'x': .85, 'y': .2, 'text': text, 'size': 11}
perpendicular_bar_func : function or str, default None
Creates a single bar perpendicular to the main bars that spans the
length of the axis.
Use either a string that the DataFrame `agg` method understands or a
user-defined function.
DataFrame strings - 'mean', 'median', 'max', 'min', etc..
The function is passed two pandas Series of the current time period's
data and ranks. It must return a single value.
def func(values, ranks):
return values.quantile(.75)
colors : str or sequence colors, default 'dark12'
Colors to be used for the bars. All matplotlib and plotly colormaps are
available by string name. Colors will repeat if there are more bars than colors.
'dark12' is the default colormap. If there are more than 10 columns,
then the default colormap will be 'dark24'
Append "_r" to the colormap name to use the reverse of the colormap.
i.e. "dark12_r"
title : str, dict, or plotly.graph_objects.layout.Title , default None
Title of animation. Use a string for simple titles or a
dictionary to specify several properties
{'text': 'My Bar Chart Race',
'x':0.5,
'y':.9,
'xanchor': 'center',
'yanchor': 'bottom'}
Other properties include: font, pad, xref, yref
| |
z in currentConcensusFeature:
try:
currentConcensusFeatureFilled.append( float (z) )
except ValueError:
currentConcensusFeatureFilled.append( float(0) )
if HeavyResidue == Missing_cleavages + 1 or HeavyResidue == 0:
#if info_in_line[8] == "1":
if charge_state not in charge_states_seen:
for c in range(numConditions-1):
quanArrayN[c].append( [ [massPerCharge, charge_state, averageRT, PSMs] , currentConcensusFeatureFilled[0:3] ] )
quanArrayR[c].append( [ [massPerCharge, charge_state, averageRT, PSMs] , currentConcensusFeatureFilled[3*c+3:3*c+6] ] )
charge_states_seen.append(charge_state)
else:
for c in range(numConditions-1):
for j in range(len(quanArrayN[c])):
if quanArrayN[c][j][0][1] == charge_state:
quanArrayN[c][j][1][0] = quanArrayN[c][j][1][0] + currentConcensusFeatureFilled[0]
quanArrayN[c][j][1][1] = quanArrayN[c][j][1][1] + currentConcensusFeatureFilled[1]
quanArrayN[c][j][1][2] = quanArrayN[c][j][1][2] + currentConcensusFeatureFilled[2]
quanArrayN[c][j][0][3] += PSMs
for j in range(len(quanArrayR[c])):
if quanArrayR[c][j][0][1] == charge_state:
quanArrayR[c][j][1][0] = quanArrayR[c][j][1][0] + currentConcensusFeatureFilled[3+3*c]
quanArrayR[c][j][1][1] = quanArrayR[c][j][1][1] + currentConcensusFeatureFilled[4+3*c]
quanArrayR[c][j][1][2] = quanArrayR[c][j][1][2] + currentConcensusFeatureFilled[5+3*c]
quanArrayR[c][j][0][3] += PSMs
if Pep_Seq and quanArrayR and Pep_Seq_thisLine != Pep_Seq:
#what to keep and what to reject (on the basis of missing data)
for c in range(numConditions-1):
for j in range(len(quanArrayN[c])):
if quanArrayN[c][j][1].count( 0 ) == 0 and quanArrayR[c][j][1].count( 0 ) == 0:
quanArrayNfilt[c].append( quanArrayN[c][j] )
quanArrayRfilt[c].append( quanArrayR[c][j] )
elif quanArrayN[c][j][1].count( 0 ) == 3 and quanArrayR[c][j][1].count( 0 ) == 0: #Missing data being used to impute low value for Ns
quanArrayNfilt[c].append( [quanArrayN[c][j][0], [1000,1000,1000]] )
quanArrayRfilt[c].append( quanArrayR[c][j] )
elif quanArrayN[c][j][1].count( 0 ) == 0 and quanArrayR[c][j][1].count( 0 ) == 3: #Missing data being used to impute low value for Rs
quanArrayNfilt[c].append( quanArrayN[c][j] )
quanArrayRfilt[c].append( [quanArrayR[c][j][0], [1000,1000,1000]] )
elif (quanArrayN[c][j][1].count( 0 ) + quanArrayR[c][j][1].count( 0 )) == 1:
if quanArrayN[c][j][1].count( 0 ) == 1:
quanArrayN[c][j][1].remove( 0 )
quanArrayNfilt[c].append( quanArrayN[c][j] )
quanArrayRfilt[c].append( quanArrayR[c][j] )
if quanArrayR[c][j][1].count( 0 ) == 1:
quanArrayR[c][j][1].remove( 0 )
quanArrayNfilt[c].append( quanArrayN[c][j] )
quanArrayRfilt[c].append( quanArrayR[c][j] )
else:
pass
PeptideRatioByCondition = []
VariationByCondition = []
normalizedPeptideRatioByCondition = []
PeptidePValueByCondition = []
for c in range(numConditions-1):
if quanArrayRfilt[c]:
ratioArray = [ np.log2(np.mean( quanArrayRfilt[c][i][1] )/np.mean( quanArrayNfilt[c][i][1] )) for i in range(len(quanArrayRfilt[c])) ]
variationArray = [ variation(quanArrayRfilt[c][i][1] ) for i in range(len(quanArrayRfilt[c])) ]
pvalueArray = [ ttest_ind( quanArrayRfilt[c][i][1], quanArrayNfilt[c][i][1] , equal_var=False )[1] for i in range(len(quanArrayRfilt[c])) ]
teststatArray = [ ttest_ind( quanArrayRfilt[c][i][1], quanArrayNfilt[c][i][1] , equal_var=False )[0] for i in range(len(quanArrayRfilt[c])) ]
PeptideRatioByCondition.append( np.median( ratioArray ) )#use the median from all the concensus features for this peptide group
VariationByCondition.append( np.min( variationArray ) )
if ProteinPValue > 2 and np.abs(ProteinRatio) > 1: #Is the ProteinRatio Significant???
normalizedPeptideRatioByCondition.append( PeptideRatioByCondition[c] - ProteinRatio )
else:
normalizedPeptideRatioByCondition.append( PeptideRatioByCondition[c] )
if len(pvalueArray) == 1:
PeptidePValueByCondition.append( np.abs(np.log10( pvalueArray[0] )) )
else:
if all(z > 0 for z in teststatArray) or all(z < 0 for z in teststatArray): #Fisher's Method
ChiSquareTeststat = 2*np.abs( np.sum( np.log(pvalueArray)* np.sign(teststatArray) ) )
PeptidePValueByCondition.append( np.abs( np.log10 ( 1 - chi2.cdf(ChiSquareTeststat,len(ratioArray)*2) )) )
else: #if the different consensus features are inconsistent with each other
PeptidePValueByCondition.append( 0 )
else:
PeptideRatioByCondition.append( 0 )
VariationByCondition.append( 0 )
normalizedPeptideRatioByCondition.append( 0 )
PeptidePValueByCondition.append( 0 )
Peptides_table.append( [Accession, ProteinGeneName, Pep_Seq, proteinaseKsite, ProteinLocation, ProteinComposition, ProteinCofactors, Essential, CopyNumber, ProteinMW, fg, numsubunits, uniquesubunits, pI, ProteinFolds, ProteinDomainRanges, ProteinNumDomains, ProteinNumUniqueDomains, DomainLocation, PositionInDomain, PeptideRatioByCondition, normalizedPeptideRatioByCondition, PeptidePValueByCondition, VariationByCondition] )
quanArrayR = [[] for i in range(numConditions-1)]
quanArrayN = [[] for i in range(numConditions-1)]
quanArrayRfilt = [[] for i in range(numConditions-1)]
quanArrayNfilt = [[] for i in range(numConditions-1)]
charge_states_seen = []
Pep_Seq = ''
#peptideLip_output_file.write('Number of proteins with half-tryptic petides:\t%s' %str(len(Proteins_with_halftryptics)))
for i in range(len(Peptides_table)):
if np.count_nonzero( Peptides_table[i][22] ) != 0:
line_to_write = '\t'.join(str(s) for s in Peptides_table[i][0:20]) + '\t' + '\t'.join(str(s) for s in Peptides_table[i][20][:]) + '\t' + '\t'.join(str(s) for s in Peptides_table[i][21][:]) + '\t' + '\t'.join(str(s) for s in Peptides_table[i][22][:]) + '\t' + '\t'.join(str(s) for s in Peptides_table[i][23][:])+ '\n'
peptideLip_output_file.write( line_to_write )
peptideLip_file.close()
peptideLip_output_file.close()
peptideLip_output_file = open( peptideLip_output_fn, 'r' )
protein_summary_fn = peptideLip_fn.split('.')[0] + '_summary18Protein.txt'
protein_summary_file = open( protein_summary_fn, 'w')
GeneName = ''
Pvalues = [[] for c in range(numConditions-1)]
Ratios = [[] for c in range(numConditions-1)]
next(peptideLip_output_file)
protein_summary_file.write( 'Accession\tGeneName\tLocation\tComposition\tCofactors\tEssential?\tCopy Number\tMolecular Weight\tAmount (fg)\tNum Subunits\tUnique Subunits\tpI\tFolds\tDomain Ranges\tNum Domains\tUnique Domains\t' + '\t'.join('SigPeptides'+str(s)+'\t'+'TotPeptides'+str(s)+'\t'+'AonPeptides'+str(s)+'\t'+'AllSigPeptides'+str(s) for s in range(1,numConditions)) + '\n' )
for line in peptideLip_output_file:
info_in_line = line.split('\t')
if info_in_line[1] == GeneName: #we're on the same gene
for c in range(numConditions-1):
Ratios[c].append( float(info_in_line[19 + numConditions + c] ) )
Pvalues[c].append( float(info_in_line[18 + 2*numConditions + c] ) )
else: #we've started looking at a new gene
if GeneName == '': #We've just started; this was the first gene
Accession = info_in_line[0]
GeneName = info_in_line[1]
Location = info_in_line[4]
Composition = info_in_line[5]
Cofactors = info_in_line[6]
Essential = info_in_line[7]
CopyNumber = info_in_line[8]
ProteinMW = info_in_line[9]
fg = info_in_line[10]
numsubunits = info_in_line[11]
uniquesubunits = info_in_line[12]
pI = info_in_line[13]
ProteinFolds = info_in_line[14]
ProteinDomainRanges = info_in_line[15]
ProteinNumDomains = info_in_line[16]
ProteinNumUniqueDomains = info_in_line[17]
DomainLocation = info_in_line[18]
PositionInDomain = info_in_line[19]
for c in range(numConditions-1):
Ratios[c].append( float(info_in_line[19 + numConditions + c] ) )
Pvalues[c].append( float(info_in_line[18 + 2*numConditions + c] ) )
else: #We've just started looking at a new gene
sigPeptidesByCondition = []
totPeptidesByCondition = []
aonPeptidesByCondition = [] #All or Nothing Peptides, those which are only present (or completely not present) in the refolded sample
allsigPeptidesByCondition = []
for c in range(numConditions-1):
sigPeptides = 0
totPeptides = 0
aonPeptides = 0
allsigPeptides = 0
for (Ratio,Pval) in zip(Ratios[c],Pvalues[c]):
if Ratio != 0 and Pval != 0:
totPeptides += 1
if (6 > np.abs(Ratio) > 1 and Pval > 2):
sigPeptides += 1
allsigPeptides += 1
if np.abs(Ratio) > 6 and Pval > 1.8:
aonPeptides += 1
allsigPeptides += 1
sigPeptidesByCondition.append( sigPeptides )
totPeptidesByCondition.append( totPeptides )
aonPeptidesByCondition.append( aonPeptides )
allsigPeptidesByCondition.append( allsigPeptides )
Pvalues = [[] for c in range(numConditions-1)]
Ratios = [[] for c in range(numConditions-1)]
protein_summary_file.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t' %(Accession,GeneName,Location,Composition,Cofactors,Essential,CopyNumber,ProteinMW,fg,numsubunits,uniquesubunits,pI,ProteinFolds,ProteinDomainRanges,ProteinNumDomains,ProteinNumUniqueDomains) + '\t'.join(str(s)+'\t'+str(t)+'\t'+str(a)+'\t'+str(aS) for (s,t,a,aS) in zip(sigPeptidesByCondition,totPeptidesByCondition,aonPeptidesByCondition,allsigPeptidesByCondition)) + '\n' )
Accession = info_in_line[0]
GeneName = info_in_line[1]
Location = info_in_line[4]
Composition = info_in_line[5]
Cofactors = info_in_line[6]
Essential = info_in_line[7]
CopyNumber = info_in_line[8]
ProteinMW = info_in_line[9]
fg = info_in_line[10]
numsubunits = info_in_line[11]
uniquesubunits = info_in_line[12]
pI = info_in_line[13]
ProteinFolds = info_in_line[14]
ProteinDomainRanges = info_in_line[15]
ProteinNumDomains = info_in_line[16]
ProteinNumUniqueDomains = info_in_line[17]
DomainLocation = info_in_line[18]
PositionInDomain = info_in_line[19]
for c in range(numConditions-1):
Ratios[c].append( float(info_in_line[19 + numConditions + c] ) )
Pvalues[c].append( float(info_in_line[18 + 2*numConditions + c] ) )
sigPeptidesByCondition = []
totPeptidesByCondition = []
aonPeptidesByCondition = []
allsigPeptidesByCondition = []
for c in range(numConditions-1):
sigPeptides = 0
totPeptides = 0
aonPeptides = 0
allsigPeptides = 0
for (Ratio,Pval) in zip(Ratios[c],Pvalues[c]):
if Ratio != 0 and Pval != 0:
totPeptides += 1
if (6 > np.abs(Ratio) > 1 and Pval > 2):
sigPeptides += 1
allsigPeptides += 1
if np.abs(Ratio) > 6 and Pval > 1.8:
aonPeptides += 1
allsigPeptides += 1
sigPeptidesByCondition.append( sigPeptides )
totPeptidesByCondition.append( totPeptides )
aonPeptidesByCondition.append( aonPeptides )
allsigPeptidesByCondition.append( allsigPeptides )
protein_summary_file.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t' %(Accession,GeneName,Location,Composition,Cofactors,Essential,CopyNumber,ProteinMW,fg,numsubunits,uniquesubunits,pI,ProteinFolds,ProteinDomainRanges,ProteinNumDomains,ProteinNumUniqueDomains) + '\t'.join(str(s)+'\t'+str(t)+'\t'+str(a)+'\t'+str(aS) for (s,t,a,aS) in zip(sigPeptidesByCondition,totPeptidesByCondition,aonPeptidesByCondition,allsigPeptidesByCondition)) + '\n' )
protein_summary_file.close()
peptideLip_output_file.close()
#Sort the peptide file on the Domain Location so that way we can use the same ordering feature to make a domain-level output
peptideLip_output_df = pd.read_csv( peptideLip_output_fn , sep='\t', header='infer')
peptideLip_output_df = peptideLip_output_df.sort_values(by=['GeneName','Domain Location','NormPeptideRatio1'])
peptideLip_output_df.to_csv( peptideLip_output_fn , sep='\t', index=False)
peptideLip_output_file = open( peptideLip_output_fn, 'r' )
domain_summary_fn = peptideLip_fn.split('.')[0] + '_summary18Domain.txt'
domain_summary_file = open( domain_summary_fn, 'w')
DomainLocation = ''
Pvalues = [[] for c in range(numConditions-1)]
Ratios = [[] for c in range(numConditions-1)]
next(peptideLip_output_file)
domain_summary_file.write( 'Accession\tDomain Location\tFold\tDomain Order\t' + '\t'.join('SigPeptides'+str(s)+'\t'+'TotPeptides'+str(s)+'\t'+'AonPeptides'+str(s)+'\t'+'AllSigPeptides'+str(s) for s in range(1,numConditions)) + '\n' )
for line in peptideLip_output_file:
info_in_line = line.split('\t')
if info_in_line[18] == DomainLocation: #we're on the same domain location
for c in range(numConditions-1):
Ratios[c].append( float(info_in_line[19 + numConditions + c] ) )
Pvalues[c].append( float(info_in_line[18 + 2*numConditions + c] ) )
else: #we've started looking at a new gene
if DomainLocation == '': #We've just started; this was the first gene
Accession = info_in_line[0]
GeneName = info_in_line[1]
DomainLocation = info_in_line[18]
ProteinFolds = info_in_line[14]
if ProteinFolds != 'Unknown':
ProteinFolds = ast.literal_eval( info_in_line[14] )
ProteinDomainRanges = ast.literal_eval( info_in_line[15] )
if type(ProteinFolds) != list:
try:
ProteinFolds = ast.literal_eval( ProteinFolds )
except SyntaxError:
ProteinFolds = [ 'domain'+str(x) for x in range(len(ProteinDomainRanges))] #for one edge case, ushA, where the quotation remarks refuse to get formatted into a list no matter what I try!
#Find the particular fold and order of domain (in sequence)
domainRange = re.sub( '[^0-9-,]', '', DomainLocation)
if domainRange != '':
try:
thisFold = ProteinFolds[ ProteinDomainRanges.index(domainRange) ]
ProteinDomainStartPoints = [ int(z.split('-')[0]) for z | |
<reponame>SaladDais/LSO2-VM-Performance<filename>indra/llcorehttp/tests/test_llcorehttp_peer.py
#!/usr/bin/env python
"""\
@file test_llsdmessage_peer.py
@author <NAME>
@date 2008-10-09
@brief This script asynchronously runs the executable (with args) specified on
the command line, returning its result code. While that executable is
running, we provide dummy local services for use by C++ tests.
$LicenseInfo:firstyear=2008&license=viewerlgpl$
Second Life Viewer Source Code
Copyright (C) 2012-2013, Linden Research, Inc.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2.1 of the License only.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
$/LicenseInfo$
"""
import os
import sys
import time
import select
import getopt
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from llbase.fastest_elementtree import parse as xml_parse
from llbase import llsd
# we're in llcorehttp/tests ; testrunner.py is found in llmessage/tests
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
"llmessage", "tests"))
from testrunner import freeport, run, debug, VERBOSE
class TestHTTPRequestHandler(BaseHTTPRequestHandler):
"""This subclass of BaseHTTPRequestHandler is to receive and echo
LLSD-flavored messages sent by the C++ LLHTTPClient.
Target URLs are fairly free-form and are assembled by
concatinating fragments. Currently defined fragments
are:
- '/reflect/' Request headers are bounced back to caller
after prefixing with 'X-Reflect-'
- '/fail/' Body of request can contain LLSD with
'reason' string and 'status' integer
which will become response header.
- '/bug2295/' 206 response, no data in body:
-- '/bug2295/0/' "Content-Range: bytes 0-75/2983"
-- '/bug2295/1/' "Content-Range: bytes 0-75/*"
-- '/bug2295/2/' "Content-Range: bytes 0-75/2983",
"Content-Length: 0"
-- '/bug2295/00000018/0/' Generates PARTIAL_FILE (18) error in libcurl.
"Content-Range: bytes 0-75/2983",
"Content-Length: 76"
-- '/bug2295/inv_cont_range/0/' Generates HE_INVALID_CONTENT_RANGE error in llcorehttp.
- '/503/' Generate 503 responses with various kinds
of 'retry-after' headers
-- '/503/0/' "Retry-After: 2"
-- '/503/1/' "Retry-After: Thu, 31 Dec 2043 23:59:59 GMT"
-- '/503/2/' "Retry-After: Fri, 31 Dec 1999 23:59:59 GMT"
-- '/503/3/' "Retry-After: "
-- '/503/4/' "Retry-After: (*#*(@*(@(")"
-- '/503/5/' "Retry-After: aklsjflajfaklsfaklfasfklasdfklasdgahsdhgasdiogaioshdgo"
-- '/503/6/' "Retry-After: 1 2 3 4 5 6 7 8 9 10"
Some combinations make no sense, there's no effort to protect
you from that.
"""
ignore_exceptions = (Exception,)
def read(self):
# The following logic is adapted from the library module
# SimpleXMLRPCServer.py.
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
try:
size_remaining = int(self.headers["content-length"])
except (KeyError, ValueError):
return ""
max_chunk_size = 10*1024*1024
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
chunk = self.rfile.read(chunk_size)
L.append(chunk)
size_remaining -= len(chunk)
return ''.join(L)
# end of swiped read() logic
def read_xml(self):
# This approach reads the entire POST data into memory first
return llsd.parse(self.read())
## # This approach attempts to stream in the LLSD XML from self.rfile,
## # assuming that the underlying XML parser reads its input file
## # incrementally. Unfortunately I haven't been able to make it work.
## tree = xml_parse(self.rfile)
## debug("Finished raw parse")
## debug("parsed XML tree %s", tree)
## debug("parsed root node %s", tree.getroot())
## debug("root node tag %s", tree.getroot().tag)
## return llsd.to_python(tree.getroot())
def do_HEAD(self):
self.do_GET(withdata=False)
def do_GET(self, withdata=True):
# Of course, don't attempt to read data.
try:
self.answer(dict(reply="success", status=200,
reason="Your GET operation worked"))
except self.ignore_exceptions, e:
print >> sys.stderr, "Exception during GET (ignoring): %s" % str(e)
def do_POST(self):
# Read the provided POST data.
# self.answer(self.read())
try:
self.answer(dict(reply="success", status=200,
reason=self.read()))
except self.ignore_exceptions, e:
print >> sys.stderr, "Exception during POST (ignoring): %s" % str(e)
def do_PUT(self):
# Read the provided PUT data.
# self.answer(self.read())
try:
self.answer(dict(reply="success", status=200,
reason=self.read()))
except self.ignore_exceptions, e:
print >> sys.stderr, "Exception during PUT (ignoring): %s" % str(e)
def answer(self, data, withdata=True):
debug("%s.answer(%s): self.path = %r", self.__class__.__name__, data, self.path)
if "/sleep/" in self.path:
time.sleep(30)
if "/503/" in self.path:
# Tests for various kinds of 'Retry-After' header parsing
body = None
if "/503/0/" in self.path:
self.send_response(503)
self.send_header("retry-after", "2")
elif "/503/1/" in self.path:
self.send_response(503)
self.send_header("retry-after", "Thu, 31 Dec 2043 23:59:59 GMT")
elif "/503/2/" in self.path:
self.send_response(503)
self.send_header("retry-after", "Fri, 31 Dec 1999 23:59:59 GMT")
elif "/503/3/" in self.path:
self.send_response(503)
self.send_header("retry-after", "")
elif "/503/4/" in self.path:
self.send_response(503)
self.send_header("retry-after", "(*#*(@*(@(")
elif "/503/5/" in self.path:
self.send_response(503)
self.send_header("retry-after", "aklsjflajfaklsfaklfasfklasdfklasdgahsdhgasdiogaioshdgo")
elif "/503/6/" in self.path:
self.send_response(503)
self.send_header("retry-after", "1 2 3 4 5 6 7 8 9 10")
else:
# Unknown request
self.send_response(400)
body = "Unknown /503/ path in server"
if "/reflect/" in self.path:
self.reflect_headers()
self.send_header("Content-type", "text/plain")
self.end_headers()
if body:
self.wfile.write(body)
elif "/bug2295/" in self.path:
# Test for https://jira.secondlife.com/browse/BUG-2295
#
# Client can receive a header indicating data should
# appear in the body without actually getting the body.
# Library needs to defend against this case.
#
body = None
if "/bug2295/0/" in self.path:
self.send_response(206)
self.send_header("Content-Range", "bytes 0-75/2983")
elif "/bug2295/1/" in self.path:
self.send_response(206)
self.send_header("Content-Range", "bytes 0-75/*")
elif "/bug2295/2/" in self.path:
self.send_response(206)
self.send_header("Content-Range", "bytes 0-75/2983")
self.send_header("Content-Length", "0")
elif "/bug2295/00000012/0/" in self.path:
self.send_response(206)
self.send_header("Content-Range", "bytes 0-75/2983")
self.send_header("Content-Length", "76")
elif "/bug2295/inv_cont_range/0/" in self.path:
self.send_response(206)
self.send_header("Content-Range", "bytes 0-75/2983")
body = "Some text, but not enough."
else:
# Unknown request
self.send_response(400)
if "/reflect/" in self.path:
self.reflect_headers()
self.send_header("Content-type", "text/plain")
self.end_headers()
if body:
self.wfile.write(body)
elif "fail" not in self.path:
data = data.copy() # we're going to modify
# Ensure there's a "reply" key in data, even if there wasn't before
data["reply"] = data.get("reply", llsd.LLSD("success"))
response = llsd.format_xml(data)
debug("success: %s", response)
self.send_response(200)
if "/reflect/" in self.path:
self.reflect_headers()
self.send_header("Content-type", "application/llsd+xml")
self.send_header("Content-Length", str(len(response)))
self.send_header("X-LL-Special", "Mememememe");
self.end_headers()
if withdata:
self.wfile.write(response)
else: # fail requested
status = data.get("status", 500)
# self.responses maps an int status to a (short, long) pair of
# strings. We want the longer string. That's why we pass a string
# pair to get(): the [1] will select the second string, whether it
# came from self.responses or from our default pair.
reason = data.get("reason",
self.responses.get(status,
("fail requested",
"Your request specified failure status %s "
"without providing a reason" % status))[1])
debug("fail requested: %s: %r", status, reason)
self.send_error(status, reason)
if "/reflect/" in self.path:
self.reflect_headers()
self.end_headers()
def reflect_headers(self):
for name in self.headers.keys():
# print "Header: %s: %s" % (name, self.headers[name])
self.send_header("X-Reflect-" + name, self.headers[name])
if not VERBOSE:
# When VERBOSE is set, skip both these overrides because they exist to
# suppress output.
def log_request(self, code, size=None):
# For present purposes, we don't want the request splattered onto
# stderr, as it would upset devs watching the test run
pass
def log_error(self, format, *args):
# Suppress error output as well
pass
class Server(HTTPServer):
# This pernicious flag is on by default in HTTPServer. But proper
# operation of freeport() absolutely depends on it being off.
allow_reuse_address = False
# Override of BaseServer.handle_error(). Not too interested
# in errors and the default handler emits a scary traceback
# to stderr which annoys some. Disable this override to get
# default behavior which *shouldn't* cause the program to return
# a failure status.
def handle_error(self, request, client_address):
print '-'*40
print 'Ignoring exception during processing of request from',
print client_address
print '-'*40
if __name__ == "__main__":
do_valgrind = False
path_search = False
options, args = getopt.getopt(sys.argv[1:], "V", ["valgrind"])
for option, value in options:
if option == "-V" or option == "--valgrind":
do_valgrind = True
# function to make a server with specified port
make_server = lambda port: Server(('127.0.0.1', port), TestHTTPRequestHandler)
if not sys.platform.startswith("win"):
# Instantiate a Server(TestHTTPRequestHandler) on a port chosen by the
# runtime.
httpd = make_server(0)
else:
# "Then there's Windows"
# Instantiate a Server(TestHTTPRequestHandler) on the first free port
# in the specified port range.
httpd, port = freeport(xrange(8000, 8020), make_server)
# Pass the selected port number to the subject test program via the
# environment. We don't want to impose requirements on the test program's
# command-line | |
= True
for DS_i in range(1, 16):
if not pd.isna(df_db.loc[cmp, f'DS{DS_i}-Family']):
empty = False
break
if empty:
cmp_to_drop.append(cmp)
df_db.drop(cmp_to_drop, axis=0, inplace=True)
cmp_kept = df_db.index.get_level_values(0).unique()
cmp_to_drop = []
for cmp in meta_dict.keys():
if cmp not in cmp_kept:
cmp_to_drop.append(cmp)
for cmp in cmp_to_drop:
del meta_dict[cmp]
# convert to optimal datatypes to reduce file size
df_db = df_db.convert_dtypes()
df_db = base.convert_to_SimpleIndex(df_db, 0)
# rename the index
df_db.index.name = "ID"
# save the consequence data
df_db.to_csv(target_data_file)
# save the metadata
with open(target_meta_file, 'w+') as f:
json.dump(meta_dict, f, indent=2)
print("Successfully parsed and saved the injury consequence data from FEMA "
"P58")
def create_FEMA_P58_bldg_redtag_db(source_file,
target_data_file='bldg_redtag_DB_FEMA_P58_2nd.csv',
target_meta_file='bldg_redtag_DB_FEMA_P58_2nd.json'):
"""
Create an red tag consequence parameter database based on the FEMA P58 data
The method was developed to process v3.1.2 of the FragilityDatabase xls
that is provided with FEMA P58 2nd edition.
Parameters
----------
source_file: string
Path to the fragility database file.
target_data_file: string
Path where the consequence data file should be saved. A csv file is
expected.
target_meta_file: string
Path where the consequence metadata should be saved. A json file is
expected.
"""
# parse the source file
df = pd.read_excel(source_file, sheet_name='Summary', header=2, index_col=1,
true_values=["YES", "Yes", "yes"],
false_values=["NO", "No", "no"])
# take another pass with booleans because the first does not always work
for true_str in ["YES", "Yes", "yes"]:
df.replace(true_str, True, inplace=True)
for false_str in ["NO", "No", "no"]:
df.replace(false_str, False, inplace=True)
# remove empty rows and columns
df.dropna(axis=0, how='all', inplace=True)
df.dropna(axis=1, how='all', inplace=True)
# filter the columns we need for the injury database
cols_to_db = [
'DS Hierarchy',
]
for DS_i in range(1, 6):
cols_to_db += [
f'DS {DS_i}, Unsafe Placard Trigger Flag',
f'DS {DS_i}, Unsafe Placard Damage Median',
f'DS {DS_i}, Unsafe Placard Damage Dispersion'
]
# filter the columns that we need for the metadata
cols_to_meta = [
"Component Name",
"Component Description",
"Construction Quality:",
"Seismic Installation Conditions:",
"Comments / Notes",
"Author",
"Fragility Unit of Measure",
"Round to Integer Unit?",
"DS 1, Description",
"DS 2, Description",
"DS 3, Description",
"DS 4, Description",
"DS 5, Description",
]
# remove special characters to make it easier to work with column names
str_map = {
ord(' '): "_",
ord('.'): "_",
ord('-'): "_",
ord(':'): None,
ord('('): None,
ord(')'): None,
ord('?'): None,
ord('/'): None,
ord(','): None,
}
df_db_source = df.loc[:, cols_to_db]
df_db_source.columns = [s.translate(str_map) for s in cols_to_db]
df_db_source.sort_index(inplace=True)
df_meta = df.loc[:, cols_to_meta]
df_meta.columns = [s.translate(str_map) for s in cols_to_meta]
df_db_source.replace('BY USER', np.nan, inplace=True)
df_db_source.replace('By User', np.nan, inplace=True)
# initialize the output loss table
# define the columns
out_cols = [
"Incomplete",
]
for DS_i in range(1, 6):
out_cols += [
f"DS{DS_i}-Family",
f"DS{DS_i}-Theta_0",
f"DS{DS_i}-Theta_1"
]
# create the database index
comps = df_db_source.index.values
df_db = pd.DataFrame(
columns=out_cols,
index=comps,
dtype=float
)
# initialize the dictionary that stores the loss metadata
meta_dict = {}
# for each component...
# (this approach is not efficient, but easy to follow which was considered
# more important than efficiency.)
for cmp in df_db_source.itertuples():
# assume the component information is complete
incomplete = False
# get the raw metadata for the component
cmp_meta = df_meta.loc[cmp.Index, :]
# store the global (i.e., not DS-specific) metadata
# every component is assumed to have a comp. description
comments = cmp_meta['Component_Description']
# the additional fields are added to the description if they exist
if cmp_meta['Construction_Quality'] != 'Not Specified':
comments += f'\nConstruction Quality: ' \
f'{cmp_meta["Construction_Quality"]}'
if cmp_meta['Seismic_Installation_Conditions'] not in [
'Not Specified', 'Not applicable', 'Unknown', 'Any']:
comments += f'\nSeismic Installation Conditions: ' \
f'{cmp_meta["Seismic_Installation_Conditions"]}'
if cmp_meta['Comments__Notes'] != 'None':
comments += f'\nNotes: {cmp_meta["Comments__Notes"]}'
if cmp_meta['Author'] not in ['Not Given', 'By User']:
comments += f'\nAuthor: {cmp_meta["Author"]}'
# get the suggested block size and replace the misleading values with ea
block_size = cmp_meta['Fragility_Unit_of_Measure'].split(' ')[::-1]
meta_data = {
"Description": cmp_meta['Component_Name'],
"Comments": comments,
"SuggestedComponentBlockSize": ' '.join(block_size),
"RoundUpToIntegerQuantity": cmp_meta['Round_to_Integer_Unit'],
"ControllingDemand": "Damage Quantity",
"DamageStates": {}
}
# Handle components with simultaneous damage states separately
if 'Simul' in cmp.DS_Hierarchy:
pass
# Note that we are assuming that components with simultaneous
# damage states do not have damage that would trigger a red tag.
# This assumption holds for the second edition of FEMA P58, but it
# might need to be revisited in future editions.
# for every other component...
else:
# now look at each Damage State
for DS_i in range(1, 6):
redtag_flag = getattr(
cmp, f'DS_{DS_i}_Unsafe_Placard_Trigger_Flag')
if redtag_flag is True:
theta_0 = getattr(cmp, f'DS_{DS_i}_Unsafe_Placard_Damage_'
f'Median')
theta_1 = getattr(cmp, f'DS_{DS_i}_Unsafe_Placard_Damage_'
f'Dispersion')
if theta_0 != 0.0:
df_db.loc[cmp.Index, f'DS{DS_i}-Family'] = 'lognormal'
df_db.loc[cmp.Index, f'DS{DS_i}-Theta_0'] = theta_0
df_db.loc[cmp.Index, f'DS{DS_i}-Theta_1'] = theta_1
if (pd.isna(theta_0) or pd.isna(theta_1)):
incomplete = True
if ~np.isnan(redtag_flag):
meta_data['DamageStates'].update({
f"DS{DS_i}": {"Description":
cmp_meta[f"DS_{DS_i}_Description"]}})
df_db.loc[cmp.Index, 'Incomplete'] = int(incomplete)
# store the metadata for this component
meta_dict.update({cmp.Index: meta_data})
# review the database and drop rows with no information
cmp_to_drop = []
for cmp in df_db.index:
empty = True
for DS_i in range(1, 6):
if not pd.isna(df_db.loc[cmp, f'DS{DS_i}-Family']):
empty = False
break
if empty:
cmp_to_drop.append(cmp)
df_db.drop(cmp_to_drop, axis=0, inplace=True)
cmp_kept = df_db.index.get_level_values(0).unique()
cmp_to_drop = []
for cmp in meta_dict.keys():
if cmp not in cmp_kept:
cmp_to_drop.append(cmp)
for cmp in cmp_to_drop:
del meta_dict[cmp]
# convert to optimal datatypes to reduce file size
df_db = df_db.convert_dtypes()
# rename the index
df_db.index.name = "ID"
# save the consequence data
df_db.to_csv(target_data_file)
# save the metadata
with open(target_meta_file, 'w+') as f:
json.dump(meta_dict, f, indent=2)
print("Successfully parsed and saved the red tag consequence data from FEMA "
"P58")
def create_Hazus_EQ_fragility_db(source_file,
target_data_file='fragility_DB_Hazus_EQ.csv',
target_meta_file='fragility_DB_Hazus_EQ.json'):
"""
Create a database file based on the HAZUS EQ Technical Manual
This method was developed to process a json file with tabulated data from
v4.2.3 of the Hazus Earthquake Technical Manual. The json file is included
in the resources folder of pelicun
Parameters
----------
source_file: string
Path to the fragility database file.
target_data_file: string
Path where the fragility data file should be saved. A csv file is
expected.
target_meta_file: string
Path where the fragility metadata should be saved. A json file is
expected.
"""
# parse the source file
with open(source_file, 'r') as f:
raw_data = json.load(f)
# prepare lists of labels for various building features
design_levels = list(
raw_data['Structural_Fragility_Groups']['EDP_limits'].keys())
building_types = list(
raw_data['Structural_Fragility_Groups']['P_collapse'].keys())
convert_design_level = {
'High_code': 'HC',
'Moderate_code': 'MC',
'Low_code': 'LC',
'Pre_code': 'PC'
}
# initialize the fragility table
df_db = pd.DataFrame(
columns=[
"ID",
"Incomplete",
"Demand-Type",
"Demand-Unit",
"Demand-Offset",
"Demand-Directional",
"LS1-Family",
"LS1-Theta_0",
"LS1-Theta_1",
"LS1-DamageStateWeights",
"LS2-Family",
"LS2-Theta_0",
"LS2-Theta_1",
"LS2-DamageStateWeights",
"LS3-Family",
"LS3-Theta_0",
"LS3-Theta_1",
"LS3-DamageStateWeights",
"LS4-Family",
"LS4-Theta_0",
"LS4-Theta_1",
"LS4-DamageStateWeights"
],
index=np.arange(len(building_types) * len(design_levels) * 5),
dtype=float
)
counter = 0
# First, prepare the structural fragilities
S_data = raw_data['Structural_Fragility_Groups']
for bt in building_types:
for dl in design_levels:
if bt in S_data['EDP_limits'][dl].keys():
# create the component id
cmp_id = f'STR.{bt}.{convert_design_level[dl]}'
df_db.loc[counter, 'ID'] = cmp_id
# store demand specifications
df_db.loc[counter, 'Demand-Type'] = "Peak Roof Drift Ratio"
df_db.loc[counter, 'Demand-Unit'] = "rad"
df_db.loc[counter, 'Demand-Offset'] = 0
# store the Limit State parameters
for LS_i in range(1, 5):
df_db.loc[counter, f'LS{LS_i}-Family'] = 'lognormal'
df_db.loc[counter, f'LS{LS_i}-Theta_0'] = \
S_data['EDP_limits'][dl][bt][LS_i - 1]
df_db.loc[counter, f'LS{LS_i}-Theta_1'] = \
S_data['Fragility_beta'][dl]
if LS_i == 4:
p_coll = S_data['P_collapse'][bt]
df_db.loc[counter, f'LS{LS_i}-DamageStateWeights'] = (
f'{1.0 - p_coll} | {p_coll}')
counter += 1
# Second, the non-structural drift sensitive one
NSD_data = raw_data['NonStructural_Drift_Sensitive_Fragility_Groups']
# create the component id
df_db.loc[counter, 'ID'] = 'NSD'
# store demand specifications
df_db.loc[counter, 'Demand-Type'] = "Peak Roof Drift Ratio"
df_db.loc[counter, 'Demand-Unit'] = "rad"
df_db.loc[counter, 'Demand-Offset'] = 0
# store the Limit State parameters
for LS_i in range(1, 5):
df_db.loc[counter, f'LS{LS_i}-Family'] = 'lognormal'
df_db.loc[counter, f'LS{LS_i}-Theta_0'] = NSD_data['EDP_limits'][
LS_i - 1]
df_db.loc[counter, f'LS{LS_i}-Theta_1'] = NSD_data['Fragility_beta']
counter += 1
# Third, the non-structural acceleration sensitive fragilities
NSA_data = raw_data['NonStructural_Acceleration_Sensitive_Fragility_Groups']
for dl in design_levels:
# create the component id
cmp_id = f'NSA.{convert_design_level[dl]}'
df_db.loc[counter, 'ID'] = cmp_id
# store demand specifications
df_db.loc[counter, 'Demand-Type'] = "Peak Floor Acceleration"
df_db.loc[counter, 'Demand-Unit'] = "g"
df_db.loc[counter, 'Demand-Offset'] = 1
# store the Limit State parameters
for LS_i in range(1, 5):
df_db.loc[counter, f'LS{LS_i}-Family'] = 'lognormal'
df_db.loc[counter, f'LS{LS_i}-Theta_0'] = \
NSA_data['EDP_limits'][dl][LS_i - 1]
df_db.loc[counter, f'LS{LS_i}-Theta_1'] = NSA_data['Fragility_beta']
counter += 1
# Fourth, the lifeline facilities
LF_data = | |
from config import TOKEN, unknown, stickers, FAQ_list_inline, list_answers
from states import SVO
from aiogram.dispatcher import FSMContext
from aiogram import Bot, Dispatcher, executor, types
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.dispatcher import FSMContext
from aiogram.types import message, ReplyKeyboardRemove
import logging, asyncio, string, random, time, kb, re
logging.basicConfig(level=logging.INFO)
storage = MemoryStorage()
bot = Bot(token=TOKEN)
dp = Dispatcher(bot, storage=storage)
# handler на команду /start
@dp.message_handler(commands=['start'])
async def start(message: types.Message):
hello = stickers('hello')
await bot.send_sticker(message.chat.id, hello)
await message.answer('👋 Привет, {0.first_name}!'
'\nЯ — HR-бот аэропорта Шереметьево. Помогаю адаптироваться новым сотрудникам. Чтобы начать пользоваться сервисом, перейди в Главное меню, нажав на кнопку'.format(message.from_user),
reply_markup=kb.reply_main)
# handler всех сообщений
@dp.message_handler()
async def main(message: types.Message, state:FSMContext):
if message.text == "🏠 Главное меню":
await message.answer('Ты в Главном меню, выбери что ты хочешь',
reply_markup=kb.reply_menu)
elif message.text == '✏️ Задать вопрос':
await message.answer('Введи мне свой вопрос, хочешь вернуться назад, нажми на кнопку',
reply_markup=kb.reply_back)
await SVO.question.set()
elif message.text == '📝 Часто задаваемые вопросы':
send = await bot.send_message(message.chat.id, 'Чтобы получить ответ, выбери тему интересующей информации:',
reply_markup=kb.inline_faq)
back = await bot.send_message(message.chat.id, 'Хочешь вернуться назад, нажми на кнопку',
reply_markup=kb.reply_back)
async with state.proxy() as data:
delete = back.message_id
mes = send.message_id
data["delete_id"] = delete
data["message_id"] = mes
elif message.text == '🔄 Задать вопрос снова':
send = await bot.send_message(message.chat.id, 'Чтобы получить ответ, выбери тему интересующей информации:',
reply_markup=kb.inline_faq)
back = await bot.send_message(message.chat.id, 'Хочешь вернуться назад, нажми на кнопку',
reply_markup=kb.reply_back)
async with state.proxy() as data:
delete = back.message_id
mes = send.message_id
data["delete_id"] = delete
data["message_id"] = mes
elif message.text == '◀️ Назад':
await message.answer('Ты в Главном меню, выбери что ты хочешь',
reply_markup=kb.reply_menu)
try:
async with state.proxy() as data:
mes = data["message_id"]
await bot.delete_message(chat_id=message.chat.id,
message_id=mes)
except KeyError:
print("Error: message_id not found")
else:
unknown_ = stickers('unknown')
await bot.send_sticker(message.chat.id, unknown_)
await message.answer(random.choice(unknown),
reply_markup=kb.reply_menu)
# handler попадания в задачу вопроса
@dp.message_handler(state=SVO.question, content_types=types.ContentTypes.TEXT)
async def question(message: types.Message, state:FSMContext):
question = message.text
async with state.proxy() as data:
data["question1"] = question
answer = data["question1"]
if answer == '◀️ Назад':
await message.answer('Ты в Главном меню, выбери что ты хочешь',
reply_markup=kb.reply_menu)
await state.reset_state()
else:
'''похожее на вопрос 0'''
park = re.findall('парк', answer, flags=re.I)
places = re.findall('мест', answer, flags=re.I)
'''похожее на вопрос 1'''
phone = re.findall('телефон', answer, flags=re.I)
urgent = re.findall('сроч', answer, flags=re.I)
rescue = re.findall('спасат', answer, flags=re.I)
service = re.findall('служб', answer, flags=re.I)
'''похожее на вопрос 2'''
passage = re.findall('проход', answer, flags=re.I)
train = re.findall('обуче', answer, flags=re.I)
security = re.findall('охран', answer, flags=re.I)
labor = re.findall('труда', answer, flags=re.I)
'''похожее на вопрос 3'''
route = re.findall('маршрут', answer, flags=re.I)
corporate = re.findall('корпорат', answer, flags=re.I)
transport = re.findall('транспорт', answer, flags=re.I)
'''похожее на вопрос 4'''
events = re.findall('событ', answer, flags=re.I)
tester = re.findall('испыт', answer, flags=re.I)
term = re.findall('срок', answer, flags=re.I)
'''похожее на вопрос 5'''
sale = re.findall('скидк', answer, flags=re.I)
employee = re.findall('сотрудник', answer, flags=re.I)
worker = re.findall('работник', answer, flags=re.I)
'''похожее на вопрос 6'''
mentor = re.findall('настав', answer, flags=re.I)
'''похожее на вопрос 7'''
card = re.findall('карт', answer, flags=re.I)
medicine = re.findall('медицин', answer, flags=re.I)
insurance = re.findall('страхов', answer, flags=re.I)
'''похожее на вопрос 8'''
admission = re.findall('пропуск', answer, flags=re.I)
if park or places:
await message.answer(list_answers[0],
reply_markup=kb.inline_more0)
await state.reset_state()
elif phone or urgent or rescue or service:
await message.answer(list_answers[1] + '\n\nМожешь еще раз задать вопрос или вернуться в Главное меню',
reply_markup=kb.reply_again_main)
await state.reset_state()
await SVO.again.set()
elif sale or employee or worker:
await message.answer(list_answers[2] + '\n\nМожешь еще раз задать вопрос или вернуться в Главное меню',
reply_markup=kb.reply_again_main, parse_mode='html')
await state.reset_state()
await SVO.again.set()
elif mentor:
await message.answer(list_answers[3] + '\n\nМожешь еще раз задать вопрос или вернуться в Главное меню',
reply_markup=kb.reply_again_main)
await state.reset_state()
await SVO.again.set()
elif route or corporate or transport:
await message.answer(list_answers[4],
reply_markup=kb.inline_more4)
await state.reset_state()
elif passage or train or security or labor:
await message.answer(list_answers[5] + '\n\nМожешь еще раз задать вопрос или вернуться в Главное меню',
reply_markup=kb.reply_again_main)
await state.reset_state()
await SVO.again.set()
elif events or tester or term:
await message.answer(list_answers[6] + '\n\nМожешь еще раз задать вопрос или вернуться в Главное меню',
reply_markup=kb.reply_again_main)
await state.reset_state()
await SVO.again.set()
elif admission:
await message.answer(list_answers[7] + '\n\nМожешь еще раз задать вопрос или вернуться в Главное меню',
reply_markup=kb.reply_again_main)
await state.reset_state()
await SVO.again.set()
elif card or medicine or insurance:
await message.answer(list_answers[8] + '\n\nМожешь еще раз задать вопрос или вернуться в Главное меню',
reply_markup=kb.reply_again_main)
await state.reset_state()
await SVO.again.set()
else:
await message.answer(f'{random.choice(unknown)}\n'
'Выбери одну из кнопок',
reply_markup=kb.reply_again_main)
await state.reset_state()
await SVO.again.set()
# handler возвращения снова к задаче вопроса
@dp.message_handler(state=SVO.again, content_types=types.ContentTypes.TEXT)
async def again(message: types.Message, state:FSMContext):
again = message.text
async with state.proxy() as data:
data["again1"] = again
text = data["again1"]
if text == '🔄 Задать вопрос снова':
await message.answer('Введи мне свой вопрос снова, хочешь вернуться назад, нажми на кнопку',
reply_markup=kb.reply_back)
await state.reset_state()
await SVO.question.set()
elif text == '🏠 Главное меню':
await message.answer('Ты в Главном меню, выбери что ты хочешь',
reply_markup=kb.reply_menu)
await state.reset_state()
else:
await message.answer(f'{random.choice(unknown)}\n'
'Выбери одну из кнопок',
reply_markup=kb.reply_again_main)
await state.reset_state()
await SVO.again.set()
# callback inline кнопок
@dp.callback_query_handler(lambda message: message.data.startswith("FAQ"))
async def faq(callback_query: types.CallbackQuery, state:FSMContext):
await bot.answer_callback_query(callback_query.id)
number = int(callback_query.data[3:])
answer = FAQ_list_inline[number][1]
async with state.proxy() as data:
mes = data["message_id"]
delete = data["delete_id"]
await bot.delete_message(chat_id=callback_query.message.chat.id,
message_id=mes)
await bot.delete_message(chat_id=callback_query.message.chat.id,
message_id=delete)
'''для корректного отображения клавиатуры'''
await asyncio.sleep(0.1)
await bot.send_message(callback_query.from_user.id, f'Ответ на тему интересующей информации: "{FAQ_list_inline[number][0]}"',
reply_markup=kb.reply_again_main)
if number == 0:
await bot.send_message(callback_query.from_user.id, answer,
reply_markup=kb.inline_more0)
elif number == 1:
await bot.send_message(callback_query.from_user.id, answer + '\n\nМожешь еще раз задать вопрос или вернуться в Главное меню',
reply_markup=kb.reply_again_main)
elif number == 2:
await bot.send_message(callback_query.from_user.id, answer + '\n\nМожешь еще раз задать вопрос или вернуться в Главное меню',
reply_markup=kb.reply_again_main, parse_mode='html')
elif number == 3:
await bot.send_message(callback_query.from_user.id, answer + '\n\nМожешь еще раз задать вопрос или вернуться в Главное меню',
reply_markup=kb.reply_again_main)
elif number == 4:
await bot.send_message(callback_query.from_user.id, answer,
reply_markup=kb.inline_more4)
elif number == 5:
await bot.send_message(callback_query.from_user.id, answer + '\n\nМожешь еще раз задать вопрос или вернуться в Главное меню',
reply_markup=kb.reply_again_main)
elif number == 6:
await bot.send_message(callback_query.from_user.id, answer + '\n\nМожешь еще раз задать вопрос или вернуться в Главное меню',
reply_markup=kb.reply_again_main)
elif number == 7:
await bot.send_message(callback_query.from_user.id, answer + '\n\nМожешь еще раз задать вопрос или вернуться в Главное меню',
reply_markup=kb.reply_again_main)
elif number == 8:
await bot.send_message(callback_query.from_user.id, answer + '\n\nМожешь еще раз задать вопрос или вернуться в Главное меню',
reply_markup=kb.reply_again_main)
# callback inline кнопоки Еще для вороса 0
@dp.callback_query_handler(lambda message: message.data.startswith("more0"))
async def more0(callback_query: types.CallbackQuery):
await bot.answer_callback_query(callback_query.id)
BDC = stickers('BDC')
P17 = stickers('P17')
P13 = stickers('P13')
EF = stickers('EF')
await bot.send_sticker(callback_query.from_user.id, BDC)
await bot.send_message(callback_query.from_user.id, 'Места внутри паркинга:\n5000 рублей в месяц\n\nМеста на крыше паркинга:\n4000 рублей в месяц',
reply_markup=ReplyKeyboardRemove())
await asyncio.sleep(3)
await bot.send_sticker(callback_query.from_user.id, P17)
await bot.send_message(callback_query.from_user.id, 'Стоимость абонемента на парковку для сотрудников составляет:\n3000 рублей в месяц\n\nДля членов профсоюза: 2500 рублей в месяц')
await asyncio.sleep(3.5)
await bot.send_sticker(callback_query.from_user.id, P13)
await bot.send_message(callback_query.from_user.id, 'Стоимость абонемента для сотрудников составляет:\n2500 рублей в месяц')
await asyncio.sleep(3.5)
await bot.send_sticker(callback_query.from_user.id, EF)
await bot.send_message(callback_query.from_user.id, 'Парковка Р4:\nСтоимость абонемента для сотрудников составляет:\n5000 рублей в месяц\n\nПарковка Р6:\nСтоимость абонемента для сотрудников составляет:\n4000 рублей в месяц\n\n'
'Можешь еще раз задать вопрос или вернуться в Главное меню',
reply_markup=kb.reply_again_main)
# callback inline кнопоки Еще для вороса 4
@dp.callback_query_handler(lambda message: message.data.startswith("more4"))
async def more4(callback_query: types.CallbackQuery):
await bot.answer_callback_query(callback_query.id)
await bot.send_message(callback_query.from_user.id, 'Выбери, в Шереметьево или из Шереметьево тебе нужно. Хочешь вернуться в Главное меню, нажми на кнопку',
reply_markup=kb.reply_transport_main)
await SVO.transport.set()
# handler отправки корпоративного транспорта
@dp.message_handler(state=SVO.transport, content_types=types.ContentTypes.TEXT)
async def transport(message: types.Message, state:FSMContext):
transport = message.text
async with state.proxy() as data:
data["transport1"] = transport
text = data["transport1"]
if text == 'В Шереметьево':
await message.answer('Выбери своё местоположение, или откуда тебе нужно добраться до Шереметьево',
reply_markup=kb.reply_from_location)
await state.reset_state()
await SVO.from_location.set()
elif text == 'Из Шереметьево':
await message.answer('Выбери куда тебе нужно',
reply_markup=kb.reply_to_location)
await state.reset_state()
await SVO.to_location.set()
elif text == '🏠 Главное меню':
await message.answer('Ты в Главном меню, выбери что ты хочешь',
reply_markup=kb.reply_menu)
await state.reset_state()
else:
await message.answer(f'{random.choice(unknown)}\n'
'Выбери одну из кнопок',
reply_markup=kb.reply_transport_main)
await state.reset_state()
await SVO.transport.set()
# handler отправки откуда нужно добраться
@dp.message_handler(state=SVO.from_location, content_types=types.ContentTypes.TEXT)
async def from_location(message: types.Message, state:FSMContext):
from_location = message.text
async with state.proxy() as data:
data["from_location1"] = from_location
text = data["from_location1"]
if text == '◀️ Назад':
await message.answer('Выбери, в Шереметьево или из Шереметьево тебе нужно. Хочешь вернуться в Главное меню, нажми на кнопку',
reply_markup=kb.reply_transport_main)
await state.reset_state()
await SVO.transport.set()
elif text == 'Лобня':
await message.answer('<ins>Маршрут №3</ins>:\nСтанция Лобня - Кафе «Севастополь» — 07:15\n\n'
'<ins>Маршрут №5</ins>:\nСтанция Лобня - Терминал D — 06:30 (по выходным 6:45), 18:50\n\n'
'<ins>Маршрут №6</ins>:\nСтанция Лобня - Терминал E — 08:25 (по будням)\n\n'
'Можешь еще раз задать вопрос или вернуться в Главное меню',
reply_markup=kb.reply_again_main, parse_mode='html')
await state.reset_state()
elif text | |
the
minimum of multiplicitie of the left and right hand side.
'''
items = list(self.__items)
isect = []
for item in other:
try:
items.remove(item)
except ValueError:
pass
else:
isect.append(item)
return frozenmultiset(isect)
def __add__(self, other):
'''
Return a :class:`frozenmultiset` with elements from the left and right hand
sides with a multiplicity equal to the sum of the left and right hand
sides.
'''
return frozenmultiset(self.__items + tuple(other))
def __sub__(self, other):
'''
Return a :class:`frozenmultiset` with elements from the left hand sides with
a multiplicity equal to the difference of the multiplicity of the left and
right hand sides, truncated to zero. Elements with multiplicity zero are
omitted.
'''
items = list(self.__items)
for item in other:
try:
items.remove(item)
except ValueError:
pass
return frozenmultiset(items)
__reduce__ = lambda self: (frozenmultiset, (self.__items,))
__hash__ = lambda self: hash(self.__key)
__eq__ = lambda self, other: type(other) is type(self) and self.__key == other.__key
__contains__ = lambda self, item: item in self.__items
__iter__ = lambda self: iter(self.__items)
__len__ = lambda self: len(self.__items)
__bool__ = lambda self: bool(self.__items)
isdisjoint = lambda self, other: not any(item in self.__items for item in other)
__repr__ = __str__ = lambda self: '{}({})'.format(type(self).__name__, list(self.__items))
class _frozenarraymeta(CacheMeta):
def __getitem__(self, dtype):
@_copyname(src=self, suffix='[{}]'.format(_getname(dtype)))
def constructor(value):
return self(value, dtype=dtype)
return constructor
class frozenarray(collections.abc.Sequence, metaclass=_frozenarraymeta):
'''
An immutable version (and drop-in replacement) of :class:`numpy.ndarray`.
Besides being immutable, the :class:`frozenarray` differs from
:class:`numpy.ndarray` in (in)equality tests. Given two :class:`frozenarray`
objects ``a`` and ``b``, the test ``a == b`` returns ``True`` if both arrays
are equal in its entirety, including dtype and shape, while the same test
with :class:`numpy.ndarray` objects would give a boolean array with
element-wise thruth values.
The constructor with predefined ``dtype`` argument can generated via the
notation ``frozenarray[dtype]``. This is shorthand for ``lambda base:
frozenarray(base, dtype=dtype)``.
Parameters
----------
base : :class:`numpy.ndarray` or array-like
The array data.
dtype
The dtype of the array or ``None``.
copy : :class:`bool`
If ``base`` is a :class:`frozenarray` and the ``dtype`` matches or is
``None``, this argument is ignored. If ``base`` is a
:class:`numpy.ndarray` and the ``dtype`` matches or is ``None`` and
``copy`` is ``False``, ``base`` is stored as is. Otherwise ``base`` is
copied.
'''
__slots__ = '__base'
__cache__ = '__nutils_hash__', '__hash__'
@staticmethod
def full(shape, fill_value):
return frozenarray(numpy.lib.stride_tricks.as_strided(fill_value, shape, [0]*len(shape)), copy=False)
def __new__(cls, base, dtype=None, copy=True):
isstrict = dtype in (strictint, strictfloat)
if dtype is None:
pass
elif dtype == bool:
dtype = bool
elif dtype in (int, strictint):
dtype = int
elif dtype in (float, strictfloat):
dtype = float
elif dtype == complex:
dtype = complex
else:
raise ValueError('unsupported dtype: {!r}'.format(dtype))
if isinstance(base, frozenarray):
if dtype is None or dtype == base.dtype:
return base
base = base.__base
if isstrict:
if not isinstance(base, numpy.ndarray):
base = numpy.array(base)
if base.size == 0:
base = base.astype(dtype)
copy = False
if base.dtype == complex or base.dtype == float and dtype == int:
raise ValueError('downcasting {!r} to {!r} is forbidden'.format(base.dtype, dtype))
self = object.__new__(cls)
self.__base = numpy.array(base, dtype=dtype) if copy or not isinstance(base, numpy.ndarray) or dtype and dtype != base.dtype else base
self.__base.flags.writeable = False
return self
def __hash__(self):
return hash((self.__base.shape, self.__base.dtype, tuple(self.__base.flat[::self.__base.size//32+1]) if self.__base.size else ())) # NOTE special case self.__base.size == 0 necessary for numpy<1.12
@property
def __nutils_hash__(self):
h = hashlib.sha1('{}.{}\0{} {}'.format(type(self).__module__, type(self).__qualname__, self.__base.shape, self.__base.dtype.str).encode())
h.update(self.__base.tobytes())
return h.digest()
@property
def __array_struct__(self):
return self.__base.__array_struct__
def __reduce__(self):
return frozenarray, (self.__base, None, False)
def __eq__(self, other):
if self is other:
return True
if type(other) is not type(self):
return False
if self.__base is other.__base:
return True
if hash(self) != hash(other) or self.__base.dtype != other.__base.dtype or self.__base.shape != other.__base.shape or numpy.not_equal(self.__base, other.__base).any():
return False
# deduplicate
self.__base = other.__base
return True
def __lt__(self, other):
if not isinstance(other, frozenarray):
return NotImplemented
return self != other and (self.dtype < other.dtype
or self.dtype == other.dtype and (self.shape < other.shape
or self.shape == other.shape and self.__base.tolist() < other.__base.tolist()))
def __le__(self, other):
if not isinstance(other, frozenarray):
return NotImplemented
return self == other or (self.dtype < other.dtype
or self.dtype == other.dtype and (self.shape < other.shape
or self.shape == other.shape and self.__base.tolist() < other.__base.tolist()))
def __gt__(self, other):
if not isinstance(other, frozenarray):
return NotImplemented
return self != other and (self.dtype > other.dtype
or self.dtype == other.dtype and (self.shape > other.shape
or self.shape == other.shape and self.__base.tolist() > other.__base.tolist()))
def __ge__(self, other):
if not isinstance(other, frozenarray):
return NotImplemented
return self == other or (self.dtype > other.dtype
or self.dtype == other.dtype and (self.shape > other.shape
or self.shape == other.shape and self.__base.tolist() > other.__base.tolist()))
def __getitem__(self, item):
retval = self.__base.__getitem__(item)
return frozenarray(retval, copy=False) if isinstance(retval, numpy.ndarray) else retval
dtype = property(lambda self: self.__base.dtype)
shape = property(lambda self: self.__base.shape)
size = property(lambda self: self.__base.size)
ndim = property(lambda self: self.__base.ndim)
flat = property(lambda self: self.__base.flat)
T = property(lambda self: frozenarray(self.__base.T, copy=False))
__len__ = lambda self: self.__base.__len__()
__repr__ = lambda self: 'frozenarray'+self.__base.__repr__()[5:]
__str__ = lambda self: self.__base.__str__()
__add__ = lambda self, other: self.__base.__add__(other)
__radd__ = lambda self, other: self.__base.__radd__(other)
__sub__ = lambda self, other: self.__base.__sub__(other)
__rsub__ = lambda self, other: self.__base.__rsub__(other)
__mul__ = lambda self, other: self.__base.__mul__(other)
__rmul__ = lambda self, other: self.__base.__rmul__(other)
__truediv__ = lambda self, other: self.__base.__truediv__(other)
__rtruediv__ = lambda self, other: self.__base.__rtruediv__(other)
__floordiv__ = lambda self, other: self.__base.__floordiv__(other)
__rfloordiv__ = lambda self, other: self.__base.__rfloordiv__(other)
__pow__ = lambda self, other: self.__base.__pow__(other)
__int__ = lambda self: self.__base.__int__()
__float__ = lambda self: self.__base.__float__()
__abs__ = lambda self: self.__base.__abs__()
__neg__ = lambda self: self.__base.__neg__()
__invert__ = lambda self: self.__base.__invert__()
__or__ = lambda self, other: self.__base.__or__(other)
__ror__ = lambda self, other: self.__base.__ror__(other)
__and__ = lambda self, other: self.__base.__and__(other)
__rand__ = lambda self, other: self.__base.__rand__(other)
__xor__ = lambda self, other: self.__base.__xor__(other)
__rxor__ = lambda self, other: self.__base.__rxor__(other)
tolist = lambda self, *args, **kwargs: self.__base.tolist(*args, **kwargs)
copy = lambda self, *args, **kwargs: self.__base.copy(*args, **kwargs)
astype = lambda self, *args, **kwargs: self.__base.astype(*args, **kwargs)
take = lambda self, *args, **kwargs: self.__base.take(*args, **kwargs)
any = lambda self, *args, **kwargs: self.__base.any(*args, **kwargs)
all = lambda self, *args, **kwargs: self.__base.all(*args, **kwargs)
sum = lambda self, *args, **kwargs: self.__base.sum(*args, **kwargs)
min = lambda self, *args, **kwargs: self.__base.min(*args, **kwargs)
max = lambda self, *args, **kwargs: self.__base.max(*args, **kwargs)
prod = lambda self, *args, **kwargs: self.__base.prod(*args, **kwargs)
dot = lambda self, *args, **kwargs: self.__base.dot(*args, **kwargs)
argsort = lambda self, *args, **kwargs: self.__base.argsort(*args, **kwargs)
swapaxes = lambda self, *args, **kwargs: frozenarray(self.__base.swapaxes(*args, **kwargs), copy=False)
ravel = lambda self, *args, **kwargs: frozenarray(self.__base.ravel(*args, **kwargs), copy=False)
reshape = lambda self, *args, **kwargs: frozenarray(self.__base.reshape(*args, **kwargs), copy=False)
transpose = lambda self, *args, **kwargs: frozenarray(self.__base.transpose(*args, **kwargs), copy=False)
cumsum = lambda self, *args, **kwargs: frozenarray(self.__base.cumsum(*args, **kwargs), copy=False)
nonzero = lambda self, *args, **kwargs: frozenarray(self.__base.nonzero(*args, **kwargs), copy=False)
class _c_arraymeta(type):
def __getitem__(self, dtype):
def constructor(value):
if isinstance(value, numpy.core._internal._ctypes):
return value
if not isinstance(value, numpy.ndarray):
value = numpy.array(value, dtype=dtype)
if not value.flags.c_contiguous:
raise ValueError('Array is not contiguous.')
if value.dtype != dtype:
raise ValueError('Expected dtype {} but array has dtype {}.'.format(dtype, value.dtype))
return value.ctypes
constructor.__qualname__ = constructor.__name__ = 'c_array[{}]'.format(_getname(dtype))
return constructor
def __call__(*args, **kwargs):
raise TypeError("cannot create an instance of class 'c_array'")
class c_array(metaclass=_c_arraymeta):
'''
Converts an array-like object to a ctypes array with a specific dtype. The
function ``c_array[dtype](array)`` returns ``array`` unmodified if ``array``
is already a ctypes array. If ``array`` is a :class:`numpy.ndarray`, the
array is converted if the ``dtype`` is correct and the array is contiguous;
otherwise :class:`ValueError` is raised. Otherwise, ``array`` is first
converted to a contiguous :class:`numpy.ndarray` and then converted to ctypes
array. In the first two cases changes made to the ctypes array are reflected
by the ``array`` argument: both are essentially views of the same data. In
the third case, changes to either ``array`` or the returned ctypes array are
not reflected by the other.
'''
class attributes:
'''
Dictionary-like container with attributes instead of keys, instantiated using
keyword arguments:
>>> A = attributes(foo=10, bar=True)
>>> A
attributes(bar=True, foo=10)
>>> A.foo
10
'''
def __init__(self, **args):
self.__dict__.update(args)
def __repr__(self):
return 'attributes({})'.format(', '.join(map('{0[0]}={0[1]!r}'.format, sorted(self.__dict__.items()))))
class unit:
'''
Framework for physical units.
The unit class provides a basic framework for specifying values with | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
가지치기 기법(Pruning) 튜토리얼
=====================================
**저자**: `<NAME> <https://github.com/mickypaganini>`_
**번역** : `안상준 <https://github.com/Justin-A>`_
최첨단 딥러닝 모델들은 굉장히 많은 수의 파라미터값들로 구성되기 때문에, 쉽게 배포되기 어렵습니다.
이와 반대로, 생물학적 신경망들은 효율적으로 희소하게 연결된 것으로 알려져 있습니다.
모델의 정확도가 손상되지 않는 범위에서 메모리, 배터리, 하드웨어 소비량을 줄이고,
기기에 경량화된 모델을 배치하며, 개인이 이용하고 있는 기기에서 프라이버시가 보장되기 위해서는
모델에 포함된 파라미터 수를 줄여 압축하는 최적의 기법을 파악하는 것이 중요합니다.
연구 측면에서는, 가지치기 기법은 굉장히 많은 수의 파라미터값들로 구성된 모델과
굉장히 적은 수의 파라미터값들로 구성된 모델 간 학습 역학 차이를 조사하는데 주로 이용되기도 하며,
하위 신경망 모델과 파라미터값들의 초기화가 운이 좋게 잘 된 케이스를 바탕으로
("`lottery tickets <https://arxiv.org/abs/1803.03635>`_") 신경망 구조를 찾는 기술들에 대해 반대 의견을 제시하기도 합니다.
이번 튜토리얼에서는, ``torch.nn.utils.prune`` 을 이용하여 여러분이 설계한 딥러닝 모델에 대해 가지치기 기법을 적용해보는 것을 배워보고,
심화적으로 여러분의 맞춤형 가지치기 기법을 구현하는 방법에 대해 배워보도록 하겠습니다.
요구사항
------------
``"torch>=1.4"``
"""
import torch
from torch import nn
import torch.nn.utils.prune as prune
import torch.nn.functional as F
######################################################################
# 딥러닝 모델 생성
# -----------------------
# 이번 튜토리얼에서는, 얀 르쿤 교수님의 연구진들이 1998년도에 발표한 ``LeNet
# <http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf>`` 의 모델 구조를 이용합니다.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
# 1개 채널 수의 이미지를 입력값으로 이용하여 6개 채널 수의 출력값을 계산하는 방식
# Convolution 연산을 진행하는 커널(필터)의 크기는 3x3 을 이용
self.conv1 = nn.Conv2d(1, 6, 3)
self.conv2 = nn.Conv2d(6, 16, 3)
self.fc1 = nn.Linear(16 * 5 * 5, 120) # Convolution 연산 결과 5x5 크기의 16 채널 수의 이미지
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, int(x.nelement() / x.shape[0]))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
model = LeNet().to(device=device)
######################################################################
# 모듈 점검
# -----------------
#
# 가지치기 기법이 적용되지 않은 LeNet 모델의 ``conv1`` 층을 점검해봅시다.
# 여기에는 2개의 파라미터값들인 ``가중치``값과 ``편향``값을이 포함될 것이며, 버퍼는 존재하지 않을 것입니다.
module = model.conv1
print(list(module.named_parameters()))
######################################################################
print(list(module.named_buffers()))
######################################################################
# 모듈 가지치기 기법 적용 예제
# -----------------------------------
#
# 모듈에 대해 가지치기 기법을 적용하기 위해 (이번 예제에서는, LeNet 모델의 ``conv1`` 층)
# 첫 번째로는, ``torch.nn.utils.prune`` (또는 ``BasePruningMethod`` 의 서브 클래스로 직접 `구현
# <torch-nn-utils-prune>`_ )
# 내 존재하는 가지치기 기법을 선택합니다.
# 그 후, 해당 모듈 내에서 가지치기 기법을 적용하고자 하는 모듈과 파라미터를 지정합니다.
# 마지막으로, 가지치기 기법에 적당한 키워드 인자값을 이용하여 가지치기 매개변수를 지정합니다.
# 이번 예제에서는, ``conv1`` 층의 가중치의 30%값들을 랜덤으로 가지치기 기법을 적용해보겠습니다.
# 모듈은 함수에 대한 첫 번째 인자값으로 전달되며, ``name`` 은 문자열 식별자를 이용하여 해당 모듈 내 매개변수를 구분합니다.
# 그리고, ``amount`` 는 가지치기 기법을 적용하기 위한 대상 가중치값들의 백분율 (0과 1사이의 실수값),
# 혹은 가중치값의 연결의 개수 (음수가 아닌 정수) 를 지정합니다.
prune.random_unstructured(module, name="weight", amount=0.3)
######################################################################
# 가지치기 기법은 가중치값들을 파라미터값들로부터 제거하고 ``weight_orig`` (즉, 초기 가중치 이름에 "_orig"을 붙인) 이라는
# 새로운 파라미터값으로 대체하는 것으로 실행됩니다.
# ``weight_orig`` 은 텐서값에 가지치기 기법이 적용되지 않은 상태를 저장합니다.
# ``bias`` 은 가지치기 기법이 적용되지 않았기 때문에 그대로 남아 있습니다.
print(list(module.named_parameters()))
######################################################################
# 위에서 선택한 가지치기 기법에 의해 생성되는 가지치기 마스크는 초기 파라미터 ``name`` 에 ``weight_mask``
# (즉, 초기 가중치 이름에 "_mask"를 붙인) 이름의 모듈 버퍼로 저장됩니다.
print(list(module.named_buffers()))
######################################################################
# 수정이 되지 않은 상태에서 순전파를 진행하기 위해서는 ``가중치``값 속성이 존재해야 합니다.
# ``torch.nn.utils.prune`` 내 구현된 가지치기 기법은 가지치기 기법이 적용된 가중치값들을 이용하여
# (기존의 가중치값에 가지치기 기법이 적용된) 순전파를 진행하고, ``weight`` 속성값에 가지치기 기법이 적용된 가중치값들을 저장합니다.
# 이제 가중치값들은 ``module`` 의 매개변수가 아니라 하나의 속성값으로 취급되는 점을 주의하세요.
print(module.weight)
######################################################################
# 최종적으로, 가지치기 기법은 파이토치의 ``forward_pre_hooks`` 를 이용하여 각 순전파가 진행되기 전에 가지치기 기법이 적용됩니다.
# 구체적으로, 지금까지 진행한 것 처럼, 모듈이 가지치기 기법이 적용되었을 때,
# 가지치기 기법이 적용된 각 파라미터값들이 ``forward_pre_hook`` 를 얻게됩니다.
# 이러한 경우, ``weight`` 이름인 기존 파라미터값에 대해서만 가지치기 기법을 적용하였기 때문에,
# 훅은 오직 1개만 존재할 것입니다.
print(module._forward_pre_hooks)
######################################################################
# 완결성을 위해, 편향값에 대해서도 가지치기 기법을 적용할 수 있으며,
# 모듈의 파라미터, 버퍼, 훅, 속성값들이 어떻게 변경되는지 확인할 수 있습니다.
# 또 다른 가지치기 기법을 적용해보기 위해, ``l1_unstructured`` 가지치기 함수에서 구현된 내용과 같이,
# L1 Norm 값이 가장 작은 편향값 3개를 가지치기를 시도해봅시다.
prune.l1_unstructured(module, name="bias", amount=3)
######################################################################
# 이전에서 실습한 내용을 토대로, 명명된 파라미터값들이 ``weight_orig``, ``bias_orig`` 2개를 모두 포함할 것이라 예상됩니다.
# 버퍼들은 ``weight_mask``, ``bias_mask`` 2개를 포함할 것입니다.
# 가지치기 기법이 적용된 2개의 텐서값들은 모듈의 속성값으로 존재할 것이며, 모듈은 2개의 ``forward_pre_hooks`` 을 갖게 될 것입니다.
print(list(module.named_parameters()))
######################################################################
print(list(module.named_buffers()))
######################################################################
print(module.bias)
######################################################################
print(module._forward_pre_hooks)
######################################################################
# 가지치기 기법 반복 적용
# ------------------------------------
#
# 모듈 내 같은 파라미터값에 대해 가지치기 기법이 여러번 적용될 수 있으며, 다양한 가지치기 기법의 조합이 적용된 것과 동일하게 적용될 수 있습니다.
# 새로운 마스크와 이전의 마스크의 결합은 ``PruningContainer`` 의 ``compute_mask`` 메소드를 통해 처리할 수 있습니다.
#
# 예를 들어, 만약 ``module.weight`` 값에 가지치기 기법을 적용하고 싶을 때, 텐서의 0번째 축의 L2 norm값을 기준으로 구조화된 가지치기 기법을 적용합니다.
# (여기서 0번째 축이란, 합성곱 연산을 통해 계산된 출력값에 대해 각 채널별로 적용된다는 것을 의미합니다.)
# 이 방식은 ``ln_structured`` 함수와 ``n=2`` 와 ``dim=0`` 의 인자값을 바탕으로 구현될 수 있습니다.
prune.ln_structured(module, name="weight", amount=0.5, n=2, dim=0)
############################################################################
# 우리가 확인할 수 있듯이, 이전 마스크의 작용을 유지하면서 채널의 50% (6개 중 3개) 에 해당되는 모든 연결을 0으로 변경합니다.
print(module.weight)
############################################################################
# 이에 해당하는 훅은 ``torch.nn.utils.prune.PruningContainer`` 형태로 존재하며, 가중치에 적용된 가지치기 기법의 이력을 저장합니다.
for hook in module._forward_pre_hooks.values():
if hook._tensor_name == "weight": # 가중치에 해당하는 훅을 선택
break
print(list(hook)) # 컨테이너 내 가지치기 기법의 이력
######################################################################
# 가지치기 기법이 적용된 모델의 직렬화
# ---------------------------------------------
# 마스크 버퍼들과 가지치기 기법이 적용된 텐서 계산에 사용된 기존의 파라미터를 포함하여 관련된 모든 텐서값들은
# 필요한 경우 모델의 ``state_dict`` 에 저장되기 떄문에, 쉽게 직렬화하여 저장할 수 있다.
print(model.state_dict().keys())
######################################################################
# 가지치기 기법의 재-파라미터화 제거
# -----------------------------------------
#
# 가지치기 기법이 적용된 것을 영구적으로 만들기 위해서, 재-파라미터화 관점의
# ``weight_orig`` 와 ``weight_mask`` 값을 제거하고, ``forward_pre_hook`` 값을 제거합니다.
# 제거하기 위해 ``torch.nn.utils.prune`` 내 ``remove`` 함수를 이용할 수 있습니다.
# 가지치기 기법이 적용되지 않은 것처럼 실행되는 것이 아닌 점을 주의하세요.
# 이는 단지 가지치기 기법이 적용된 상태에서 가중치 파라미터값을 모델 파라미터값으로 재할당하는 것을 통해 영구적으로 만드는 것일 뿐입니다.
######################################################################
# 재-파라미터화를 제거하기 전 상태
print(list(module.named_parameters()))
######################################################################
print(list(module.named_buffers()))
######################################################################
print(module.weight)
######################################################################
# 재-파라미터를 제거한 후 상태
prune.remove(module, 'weight')
print(list(module.named_parameters()))
######################################################################
print(list(module.named_buffers()))
######################################################################
# 모델 내 여러 파라미터값들에 대하여 가지치기 기법 적용
# --------------------------------------
#
# 가지치기 기법을 적용하고 싶은 파라미터값들을 지정함으로써, 이번 예제에서 볼 수 있는 것 처럼,
# 신경망 모델 내 여러 텐서값들에 대해서 쉽게 가지치기 기법을 적용할 수 있습니다.
new_model = LeNet()
for name, module in new_model.named_modules():
# 모든 2D-conv 층의 20% 연결에 대해 가지치기 기법을 적용
if isinstance(module, torch.nn.Conv2d):
prune.l1_unstructured(module, name='weight', amount=0.2)
# 모든 선형 층의 40% 연결에 대해 가지치기 기법을 적용
elif isinstance(module, torch.nn.Linear):
prune.l1_unstructured(module, name='weight', amount=0.4)
print(dict(new_model.named_buffers()).keys()) # 존재하는 모든 마스크들을 확인
######################################################################
# 전역 범위에 대한 가지치기 기법 적용
# ----------------------------------------------
#
# 지금까지, "지역 변수" 에 대해서만 가지치기 기법을 적용하는 방법을 살펴보았습니다.
# (즉, 가중치 규모, 활성화 정도, 경사값 등의 각 항목의 통계량을 바탕으로 모델 내 텐서값 하나씩 가지치기 기법을 적용하는 방식)
# 그러나, 범용적이고 아마 더 강력한 방법은 각 층에서 가장 낮은 20%의 연결을 제거하는것 대신에, 전체 모델에 대해서 가장 낮은 20% 연결을 한번에 제거하는 것입니다.
# 이것은 각 층에 대해서 가지치기 기법을 적용하는 연결의 백분율값을 다르게 만들 가능성이 있습니다.
# ``torch.nn.utils.prune`` 내 ``global_unstructured`` 을 이용하여 어떻게 전역 범위에 대한 가지치기 기법을 적용하는지 살펴봅시다.
model = LeNet()
parameters_to_prune = (
(model.conv1, 'weight'),
(model.conv2, 'weight'),
(model.fc1, 'weight'),
(model.fc2, 'weight'),
(model.fc3, 'weight'),
)
prune.global_unstructured(
parameters_to_prune,
pruning_method=prune.L1Unstructured,
amount=0.2,
)
######################################################################
# 이제 각 층에 존재하는 연결들에 가지치기 기법이 적용된 정도가 20%가 아닌 것을 확인할 수 있습니다.
# 그러나, 전체 가지치기 적용 범위는 약 20%가 될 것입니다.
print(
"Sparsity in conv1.weight: {:.2f}%".format(
100. * float(torch.sum(model.conv1.weight == 0))
/ float(model.conv1.weight.nelement())
)
)
print(
"Sparsity in conv2.weight: {:.2f}%".format(
100. * float(torch.sum(model.conv2.weight == 0))
/ float(model.conv2.weight.nelement())
)
)
print(
"Sparsity in fc1.weight: {:.2f}%".format(
100. * float(torch.sum(model.fc1.weight == 0))
/ float(model.fc1.weight.nelement())
)
)
print(
"Sparsity in fc2.weight: {:.2f}%".format(
100. * float(torch.sum(model.fc2.weight == 0))
/ float(model.fc2.weight.nelement())
)
)
print(
"Sparsity in fc3.weight: {:.2f}%".format(
100. * float(torch.sum(model.fc3.weight == 0))
/ float(model.fc3.weight.nelement())
)
)
print(
"Global sparsity: {:.2f}%".format(
100. * float(
torch.sum(model.conv1.weight == 0)
+ torch.sum(model.conv2.weight == 0)
+ torch.sum(model.fc1.weight == 0)
+ torch.sum(model.fc2.weight == 0)
+ torch.sum(model.fc3.weight == 0)
)
/ float(
model.conv1.weight.nelement()
+ model.conv2.weight.nelement()
+ model.fc1.weight.nelement()
+ model.fc2.weight.nelement()
+ model.fc3.weight.nelement()
)
)
)
######################################################################
# ``torch.nn.utils.prune`` 에서 확장된 맞춤형 가지치기 기법
# ------------------------------------------------------------------
# 맞춤형 가지치기 기법은, 다른 가지치기 기법을 적용하는 것과 같은 방식으로,
# ``BasePruningMethod`` 의 기본 클래스인 ``nn.utils.prune`` 모듈을 활용하여 구현할 수 있습니다.
# 기본 클래스는 ``__call__``, ``apply_mask``, ``apply``, ``prune``, ``remove`` 메소드들을 내포하고 있습니다.
# 특별한 케이스가 아닌 경우, 기본적으로 구성된 메소드들을 재구성할 필요가 없습니다.
# 그러나, ``__init__`` (구성요소), ``compute_mask``
# (가지치기 기법의 논리에 따라 주어진 텐서값에 마스크를 적용하는 방법) 을 고려하여 구성해야 합니다.
# 게다가, 가지치기 기법을 어떠한 방식으로 적용하는지 명확하게 구성해야 합니다.
# (지원되는 옵션은 ``global``, ``structured``, ``unstructured`` 입니다.)
# 이러한 방식은, 가지치기 기법을 반복적으로 적용해야 하는 경우 마스크를 결합하는 방법을 결정하기 | |
true if the left Variable default value is less than or equal to the
right Variable default value. False otherwise.
DOCTEST
=======
>>> w = Variable('x',5)
>>> x = Variable('x',5)
>>> y = Variable('y',3)
>>> z = Variable('z',5)
>>> w == x
True
>>> x == z
False
>>> x == y
False
>>> x > y
True
>>> x < w
False
>>> x <= z
True
>>> x >= y
True
'''
def __eq__(self,other):
if not (type(self)==type(other)):
return False
return (self.default_val == other.default_val and
self.var_name == other.var_name)
def __ne__(self,other):
return not self.__eq__(other)
def __le__(self,other):
if (type(self)==type(other)):
return self.default_val <= other.default_val
else:
raise ValueError("Can't compare objects of {} and {}".format(type(self),type(other)))
def __ge__(self,other):
if (type(self)==type(other)):
return self.default_val >= other.default_val
else:
raise ValueError("Can't compare objects of {} and {}".format(type(self),type(other)))
def __lt__(self,other):
if (type(self)==type(other)):
return self.default_val < other.default_val
else:
raise ValueError("Can't compare objects of {} and {}".format(type(self),type(other)))
def __gt__(self,other):
if (type(self)==type(other)):
return self.default_val > other.default_val
else:
raise ValueError("Can't compare objects of {} and {}".format(type(self),type(other)))
class VectorFunction(DiffObj):
'''
Vector function for use in root_finder.py.
'''
def __init__(self, list_of_functions):
self.list_of_functions = list_of_functions
name_list = []
for f in self.list_of_functions:
try:
name_list = name_list+f.name_list
# user added a constant to the vector
except:
pass
name_list = list(set(name_list))
super(VectorFunction, self).__init__(name_list, None, None)
def get_val(self, val_dict):
return [f.get_val(val_dict) for f in self.list_of_functions]
def get_der(self, val_dict, with_respect_to=None):
return [f.get_der(val_dict, with_respect_to) for f in self.list_of_functions]
# returns 2d array from list of dictionarys
def dict_list_to_array(self,dict_list):
arr = []
for f_dict in dict_list:
row = []
for var in self.name_list:
row.append(f_dict[var])
arr.append(row)
return arr
'''
EQUALITY OPERATOR BEHAVIOR
==========================
__eq__ Returns True if Vector objects have the same derivative and value at their respective default
Variable values. Variables must also have the same names. False otherwise.
__ne__ Returns the boolean negation of __eq__.
__gt__ Returns true if the left Vector object value is greater than the right MathOps object value at their
respective default Variable values in all dimensions. False otherwise.
__lt__ Returns true if the left Vector object value is less than the right MathOps object value at their
respective default Variable values in all dimensions. False otherwise.
__ge__ Returns true if the left Vector object value is greater than or equal to the right MathOps object
value at their respective default Variable values in all dimensions. False otherwise.
__le__ Returns true if the left Vector object value is less than or equal to the right MathOps object value
at their respective default Variable values in all dimensions. False otherwise.
'''
def __eq__(self,other):
if not (type(self)==type(other)):
return False
for i,func in enumerate(self.list_of_functions):
if not other.list_of_functions[i] == func:
return False
return True
def __ne__(self,other):
if (type(self)==type(other)):
return not self.__eq__(other)
else:
raise ValueError("Can't compare objects of {} and {}".format(type(self),type(other)))
def __le__(self,other):
if (type(self)==type(other)):
for i,func in enumerate(self.list_of_functions):
if func > other.list_of_functions[i]:
return False
return True
else:
raise ValueError("Can't compare objects of {} and {}".format(type(self),type(other)))
def __ge__(self,other):
if (type(self)==type(other)):
for i,func in enumerate(self.list_of_functions):
if func < other.list_of_functions[i]:
return False
return True
else:
raise ValueError("Can't compare objects of {} and {}".format(type(self),type(other)))
def __lt__(self,other):
if (type(self)==type(other)):
for i,func in enumerate(self.list_of_functions):
if func >= other.list_of_functions[i]:
return False
return True
else:
raise ValueError("Can't compare objects of {} and {}".format(type(self),type(other)))
def __gt__(self,other):
if (type(self)==type(other)):
for i,func in enumerate(self.list_of_functions):
if func <= other.list_of_functions[i]:
return False
return True
else:
raise ValueError("Can't compare objects of {} and {}".format(type(self),type(other)))
class MathOps(DiffObj):
'''
This class inherits from the DiffObj class. It implements non-elementary unary functions
including: sin, cos, tan, log, exp.
INSTANTIATION
===============
If a is of type DiffObj, then the invoking the constructor as follows will return an
object b of type MathOps:
b = MathOps.sin(a)
CLASS ATTRIBUTES
================
The attributes are not meant to be used by an end-user of our package, and they are meant for internal
computation.
name_list: A list of strings, where each item in the list represents the variables inside
the function represented by this DiffObj. E.g. for f(x,y) = x + y, the name_list
for a DiffObj representing f will be ['x', 'y'] (assuming the x.name_list = ['x']
and y.name_list = ['y'].
operator: A string, such as 'sin' or 'log', which represents one of the unary math operators
implemented by this class.
operand_list: A list of length 1 containing the DiffObj which the user has passed as an argument
to one of the classmethods of MathOps.
'''
def __init__(self, name_list, operator, operand):
super(MathOps, self).__init__(name_list,
operator, operand)
@classmethod
def getUnaryOperator(cls, operator, obj):
try:
name_list = obj.name_list
return MathOps(name_list, operator, [obj])
except:
raise TypeError('Only objects of type DiffObj are permitted.')
@classmethod
def sin(cls, obj):
'''
INPUT
=====
obj: An object of type DiffObj, on which the user wants to
apply the sine function.
OUTPUT
======
result: A DiffObj, whose operator is 'sin' and whose operand is
the DiffObj on which the user had called this sin function.
DOCTEST
======
>>> z=MathOps.sin(x)
>>> z.get_val({'x':math.pi})
1.2246467991473532e-16
>>> z.get_der({'x':math.pi})
{'x': -1.0}
'''
return MathOps.getUnaryOperator('sin', obj)
@classmethod
def cos(cls, obj):
'''
INPUT
=====
obj: An object of type DiffObj, on which the user wants to
apply the cos function.
OUTPUT
======
result: A DiffObj, whose operator is 'cos' and whose operand is
the DiffObj on which the user had called this cos function.
DOCTEST
======
>>> z=MathOps.cos(x)
>>> z.get_val({'x':math.pi})
-1.0
>>> z.get_der({'x':math.pi})
{'x': -1.2246467991473532e-16}
'''
return MathOps.getUnaryOperator('cos', obj)
@classmethod
def tan(cls,obj):
'''
INPUT
=====
obj: An object of type DiffObj, on which the user wants to
apply the tan function.
OUTPUT
======
result: A DiffObj, whose operator is 'tan' and whose operand is
the DiffObj on which the user had called this tan function.
DOCTEST
======
>>> z=MathOps.tan(x)
>>> z.get_val({'x':0})
0.0
>>> z.get_der({'x':0})
{'x': 1.0}
'''
return MathOps.getUnaryOperator('tan', obj)
@classmethod
def arcsin(cls,obj):
'''
INPUT
=====
obj: An object of type DiffObj, on which the user wants to
apply the arcsin function.
OUTPUT
======
result: A DiffObj, whose operator is 'arcsin' and whose operand is
the DiffObj on which the user had called this arcsin function.
DOCTEST
======
>>> z=MathOps.arcsin(x)
>>> z.get_val({'x':0})
0.0
>>> z.get_der({'x':0})
{'x': 1.0}
'''
return MathOps.getUnaryOperator('arcsin', obj)
@classmethod
def arccos(cls,obj):
'''
INPUT
=====
obj: An object of type DiffObj, on which the user wants to
apply the arccos function.
OUTPUT
======
result: A DiffObj, whose operator is 'arccos' and whose operand is
the DiffObj on which the user had called this arccos function.
DOCTEST
======
>>> z=MathOps.arccos(x)
>>> z.get_val({'x':0})
math.pi/2
>>> z.get_der({'x':0})
{'x': -1.0}
'''
return MathOps.getUnaryOperator('arccos', obj)
@classmethod
def arctan(cls,obj):
'''
INPUT
=====
obj: An object of type DiffObj, on which the user wants to
apply the arctan function.
OUTPUT
======
result: A DiffObj, whose operator is 'arctan' and whose operand is
the DiffObj on which the user had called this arctan function.
DOCTEST
======
>>> z=MathOps.arctan(x)
>>> z.get_val({'x':0.0})
0.0
>>> z.get_der({'x':0.0})
{'x': 1.0}
'''
return MathOps.getUnaryOperator('arctan', obj)
@classmethod
def sinh(cls, obj):
'''
INPUT
=====
obj: An object of type DiffObj, on which the user wants to
apply the hyperbolic sine function.
OUTPUT
======
result: A DiffObj, whose operator is 'hsin' and whose operand is
the DiffObj on which the user had called this hyperbolic sin function.
DOCTEST
======
>>> z=MathOps.hsin(x)
>>> z.get_val({'x':math.pi})
1.2246467991473532e-16
>>> z.get_der({'x':math.pi})
{'x': -1.0}
'''
return MathOps.getUnaryOperator('sinh', obj)
@classmethod
def cosh(cls, obj):
'''
INPUT
=====
obj: An object of type DiffObj, on which the user wants to
apply the hyperbolic cos function.
OUTPUT
======
result: A DiffObj, whose operator is 'hcos' and whose operand is
the DiffObj on which the user had called this hyperbolic cos function.
DOCTEST
======
>>> z=MathOps.cos(x)
>>> z.get_val({'x':math.pi})
-1.0
>>> z.get_der({'x':math.pi})
{'x': -1.2246467991473532e-16}
'''
return MathOps.getUnaryOperator('cosh', obj)
@classmethod
def tanh(cls,obj):
'''
INPUT
=====
obj: An object | |
# coding=utf-8
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow-transform CsvCoder tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pickle
import numpy as np
import tensorflow as tf
from tensorflow_transform.coders import csv_coder
from tensorflow_transform.tf_metadata import dataset_schema
import unittest
class TestCSVCoder(unittest.TestCase):
_COLUMNS = ['numeric1', 'text1', 'category1', 'idx', 'numeric2', 'value',
'boolean1']
# The following input schema has no default values, so any invocations to
# decode with missing values should raise an error. CsvCoderDecodeTest adds
# good coverage for missing value handling.
_INPUT_SCHEMA = dataset_schema.from_feature_spec({
'numeric1': tf.FixedLenFeature(shape=[], dtype=tf.int64),
'numeric2': tf.VarLenFeature(dtype=tf.float32),
'boolean1': tf.FixedLenFeature(shape=[1], dtype=tf.bool),
'text1': tf.FixedLenFeature(shape=[], dtype=tf.string),
'category1': tf.VarLenFeature(dtype=tf.string),
'y': tf.SparseFeature('idx', 'value', tf.float32, 10),
})
_ENCODE_DECODE_CASES = [
# FixedLenFeature scalar int.
('12', 12, False,
tf.FixedLenFeature(shape=[], dtype=tf.int64)),
# FixedLenFeature scalar float without decimal point.
('12', 12, False,
tf.FixedLenFeature(shape=[], dtype=tf.float32)),
# FixedLenFeature scalar boolean.
('True', True, False,
tf.FixedLenFeature(shape=[], dtype=tf.bool)),
# FixedLenFeature scalar boolean.
('False', False, False,
tf.FixedLenFeature(shape=[], dtype=tf.bool)),
# FixedLenFeature length 1 vector int.
('12', [12], False,
tf.FixedLenFeature(shape=[1], dtype=tf.int64)),
# FixedLenFeature size 1 matrix int.
('12', [[12]], False,
tf.FixedLenFeature(shape=[1, 1], dtype=tf.int64)),
# FixedLenFeature unquoted text.
('this is unquoted text', 'this is unquoted text', False,
tf.FixedLenFeature(shape=[], dtype=tf.string)),
# FixedLenFeature quoted text.
('"this is a ,text"', 'this is a ,text', False,
tf.FixedLenFeature(shape=[], dtype=tf.string)),
# FixedLenFeature scalar numeric with default value.
('4', 4, False,
tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=-1)),
# FixedLenFeature scalar text with default value set.
('a test', 'a test', False,
tf.FixedLenFeature(shape=[], dtype=tf.string, default_value='d')),
# VarLenFeature text.
('a test', ['a test'], False,
tf.VarLenFeature(dtype=tf.string)),
# SparseFeature float one value.
('5,2.0', ([5], [2.0]), False,
tf.SparseFeature('idx', 'value', tf.float32, 10)),
# SparseFeature float no values.
(',', ([], []), False,
tf.SparseFeature('idx', 'value', tf.float32, 10)),
# FixedLenFeature scalar int, multivalent.
('12', 12, True,
tf.FixedLenFeature(shape=[], dtype=tf.int64)),
# FixedLenFeature length 1 vector int, multivalent.
('12', [12], True,
tf.FixedLenFeature(shape=[1], dtype=tf.int64)),
# FixedLenFeature length 2 vector int, multivalent.
('12|14', [12, 14], True,
tf.FixedLenFeature(shape=[2], dtype=tf.int64)),
# FixedLenFeature size 1 matrix int.
('12', [[12]], True,
tf.FixedLenFeature(shape=[1, 1], dtype=tf.int64)),
# FixedLenFeature size (2, 2) matrix int.
('12|13|14|15', [[12, 13], [14, 15]], True,
tf.FixedLenFeature(shape=[2, 2], dtype=tf.int64)),
]
_DECODE_ERROR_CASES = [
# FixedLenFeature scalar numeric missing value.
('', ValueError, r'expected a value on column \'x\'', False,
tf.FixedLenFeature(shape=[], dtype=tf.int64)),
# FixedLenFeature length 1 vector numeric missing value.
('', ValueError, r'expected a value on column \'x\'', False,
tf.FixedLenFeature(shape=[1], dtype=tf.int64)),
# FixedLenFeature length >1 vector.
('1', ValueError,
r'FixedLenFeature \'x\' was not multivalent', False,
tf.FixedLenFeature(shape=[2], dtype=tf.int64)),
# FixedLenFeature scalar text missing value.
('', ValueError, r'expected a value on column \'x\'', False,
tf.FixedLenFeature(shape=[], dtype=tf.string)),
# SparseFeature with missing value but present index.
('5,', ValueError,
r'SparseFeature \'x\' has indices and values of different lengths',
False,
tf.SparseFeature('idx', 'value', tf.float32, 10)),
# SparseFeature with missing index but present value.
(',2.0', ValueError,
r'SparseFeature \'x\' has indices and values of different lengths',
False,
tf.SparseFeature('idx', 'value', tf.float32, 10)),
# SparseFeature with negative index.
('-1,2.0', ValueError, r'has index -1 out of range', False,
tf.SparseFeature('idx', 'value', tf.float32, 10)),
# SparseFeature with index equal to size.
('10,2.0', ValueError, r'has index 10 out of range', False,
tf.SparseFeature('idx', 'value', tf.float32, 10)),
# SparseFeature with index greater than size.
('11,2.0', ValueError, r'has index 11 out of range', False,
tf.SparseFeature('idx', 'value', tf.float32, 10)),
# FixedLenFeature with text missing value.
('test', ValueError, r'could not convert string to float: test', False,
tf.FixedLenFeature(shape=[], dtype=tf.float32)),
# FixedLenFeature scalar int, multivalent, too many values.
('1|2', ValueError,
r'FixedLenFeature \'x\' got wrong number of values', True,
tf.FixedLenFeature(shape=[], dtype=tf.float32)),
# FixedLenFeature length 1 int, multivalent, too many values.
('1|2', ValueError,
r'FixedLenFeature \'x\' got wrong number of values', True,
tf.FixedLenFeature(shape=[1], dtype=tf.float32)),
# FixedLenFeature length 2 int, multivalent, too few values.
('1', ValueError,
r'FixedLenFeature \'x\' got wrong number of values', True,
tf.FixedLenFeature(shape=[2], dtype=tf.float32)),
]
_ENCODE_ERROR_CASES = [
# FixedLenFeature length 2 vector, multivalent with wrong number of
# values.
([1, 2, 3], ValueError,
r'FixedLenFeature \'x\' got wrong number of values', True,
tf.FixedLenFeature(shape=[2], dtype=tf.string))
]
_DECODE_ONLY_CASES = [
# FixedLenFeature scalar float with decimal point.
('12.0', 12, False,
tf.FixedLenFeature(shape=[], dtype=tf.float32)),
# FixedLenFeature scalar float with quoted value.
('"12.0"', 12, False,
tf.FixedLenFeature(shape=[], dtype=tf.float32)),
# FixedLenFeature scalar numeric with missing value and default value set.
('', -1, False,
tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=-1)),
# FixedLenFeature scalar text with missing value and default value set.
('', 'd', False,
tf.FixedLenFeature(shape=[], dtype=tf.string, default_value='d')),
# FixedLenFeature scalar numeric with missing value and default value set,
# where default value is falsy.
('', 0, False,
tf.FixedLenFeature(shape=[], dtype=tf.int64, default_value=0)),
# FixedLenFeature scalar text with missing value and default value set,
# where default value is falsy.
('', '', False,
tf.FixedLenFeature(shape=[], dtype=tf.string, default_value='')),
# VarLenFeature text with missing value.
('', [], False,
tf.VarLenFeature(dtype=tf.string)),
# FixedLenFeature scalar text with default value set.
('', True, False,
tf.FixedLenFeature(shape=[], dtype=tf.bool, default_value=True)),
]
longMessage = True
def _msg_for_decode_case(self, csv_line, feature_spec):
return 'While decoding "{csv_line}" with FeatureSpec {feature_spec}'.format(
csv_line=csv_line, feature_spec=feature_spec)
def _msg_for_encode_case(self, value, feature_spec):
return 'While encoding {value} with FeatureSpec {feature_spec}'.format(
value=value, feature_spec=feature_spec)
def _assert_encode_decode(self, coder, data, expected_decoded):
decoded = coder.decode(data)
np.testing.assert_equal(decoded, expected_decoded)
encoded = coder.encode(decoded)
np.testing.assert_equal(encoded, data.encode('utf-8'))
decoded_again = coder.decode(encoded)
np.testing.assert_equal(decoded_again, expected_decoded)
def test_csv_coder(self):
data = '12,"this is a ,text",categorical_value,1,89.0,12.0,False'
coder = csv_coder.CsvCoder(self._COLUMNS, self._INPUT_SCHEMA)
# Python types.
expected_decoded = {'category1': ['categorical_value'],
'numeric1': 12,
'numeric2': [89.0],
'boolean1': [False],
'text1': 'this is a ,text',
'y': ([1], [12.0])}
self._assert_encode_decode(coder, data, expected_decoded)
# Numpy types.
expected_decoded = {'category1': np.array(['categorical_value']),
'numeric1': np.array(12),
'numeric2': np.array([89.0]),
'boolean1': np.array([False]),
'text1': np.array(['this is a ,text']),
'y': (np.array(1), np.array([12.0]))}
self._assert_encode_decode(coder, data, expected_decoded)
def test_csv_coder_with_unicode(self):
data = u'12,"this is a ,text",שקרכלשהו,1,89.0,12.0,False'
coder = csv_coder.CsvCoder(self._COLUMNS, self._INPUT_SCHEMA)
# Python types.
expected_decoded = {
'category1': [u'שקרכלשהו'.encode('utf-8')],
'numeric1': 12,
'numeric2': [89.0],
'boolean1': [False],
'text1': 'this is a ,text',
'y': ([1], [12.0])
}
self._assert_encode_decode(coder, data, expected_decoded)
# Numpy types.
expected_decoded = {
'category1': np.array([u'שקרכלשהו'.encode('utf-8')]),
'numeric1': np.array(12),
'numeric2': np.array([89.0]),
'boolean1': np.array([False]),
'text1': np.array(['this is a ,text']),
'y': (np.array(1), np.array([12.0]))
}
self._assert_encode_decode(coder, data, expected_decoded)
def test_tsv_coder(self):
data = '12\t"this is a \ttext"\tcategorical_value\t1\t89.0\t12.0\tTrue'
coder = csv_coder.CsvCoder(self._COLUMNS, self._INPUT_SCHEMA,
delimiter='\t')
expected_decoded = {'category1': ['categorical_value'],
'numeric1': 12,
'numeric2': [89.0],
'boolean1': [True],
'text1': 'this is a \ttext',
'y': ([1], [12.0])}
self._assert_encode_decode(coder, data, expected_decoded)
def test_valency(self):
data = ('11|12,"this is a ,text",categorical_value|other_value,1|3,89.0|'
'91.0,12.0|15.0,False')
feature_spec = self._INPUT_SCHEMA.as_feature_spec().copy()
feature_spec['numeric1'] = tf.FixedLenFeature(shape=[2], dtype=tf.int64)
schema = dataset_schema.from_feature_spec(feature_spec)
multivalent_columns = ['numeric1', 'numeric2', 'y']
coder = csv_coder.CsvCoder(self._COLUMNS, schema,
delimiter=',', secondary_delimiter='|',
multivalent_columns=multivalent_columns)
expected_decoded = {'category1': ['categorical_value|other_value'],
'numeric1': [11, 12],
'numeric2': [89.0, 91.0],
'boolean1': [False],
'text1': 'this is a ,text',
'y': ([1, 3], [12.0, 15.0])}
self._assert_encode_decode(coder, data, expected_decoded)
# Test successful decoding with a single column.
def testDecode(self):
for csv_line, value, multivalent, feature_spec in (
self._ENCODE_DECODE_CASES + self._DECODE_ONLY_CASES):
schema = dataset_schema.from_feature_spec({'x': feature_spec})
if isinstance(feature_spec, tf.SparseFeature):
columns = [feature_spec.index_key, feature_spec.value_key]
else:
columns = 'x'
if multivalent:
coder = csv_coder.CsvCoder(columns, schema, secondary_delimiter='|',
multivalent_columns=columns)
else:
coder = csv_coder.CsvCoder(columns, schema)
np.testing.assert_equal(coder.decode(csv_line), {'x': value},
self._msg_for_decode_case(csv_line, feature_spec))
# Test decode errors with a single column.
def testDecodeErrors(self):
for csv_line, error_type, error_msg, multivalent, feature_spec in (
self._DECODE_ERROR_CASES):
schema = dataset_schema.from_feature_spec({'x': feature_spec})
if isinstance(feature_spec, tf.SparseFeature):
columns = [feature_spec.index_key, feature_spec.value_key]
else:
columns = 'x'
with self.assertRaisesRegexp(
error_type, error_msg,
msg=self._msg_for_decode_case(csv_line, feature_spec)):
# We don't distinguish between errors in the coder constructor and in
# the decode method.
if multivalent:
coder = csv_coder.CsvCoder(columns, schema, secondary_delimiter='|',
multivalent_columns=columns)
else:
coder = csv_coder.CsvCoder(columns, schema)
coder.decode(csv_line)
# Test successful encoding with a single column.
def testEncode(self):
for csv_line, value, multivalent, feature_spec in self._ENCODE_DECODE_CASES:
schema = dataset_schema.from_feature_spec({'x': feature_spec})
if isinstance(feature_spec, tf.SparseFeature):
columns = [feature_spec.index_key, feature_spec.value_key]
else:
columns = 'x'
if multivalent:
coder = csv_coder.CsvCoder(columns, schema, secondary_delimiter='|',
multivalent_columns=columns)
else:
coder = csv_coder.CsvCoder(columns, schema)
self.assertEqual(coder.encode({'x': value}), csv_line,
msg=self._msg_for_encode_case(value, feature_spec))
# Test successful encoding with a single column.
def testEncodeErrors(self):
for value, error_type, error_msg, multivalent, feature_spec in (
self._ENCODE_ERROR_CASES):
schema = dataset_schema.from_feature_spec({'x': feature_spec})
if isinstance(feature_spec, tf.SparseFeature):
| |
import math
import time
import sys
from PyQt4 import QtGui, QtCore
import serial
import new_era_vp as new_era
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import pdb
# ToDo | 1. Add email address for sending email when done (only works if
# ToDo | connected to the internet of course)
# ToDo | 2. Check how to properly wipe/flush the pump's memory so it doesn't do
# ToDo | any ghost pumping on startup. Use *RESET? See new_era_vp.py
# ToDo |--> Read manual (10.4.5 and 12.1) and see if there is a way to reset
# ToDo | the memory so that manual usage of the pump can be done.
# ToDo |
# ToDo | Issue: What happens if the pump hits the wall, i.e. if user puts in
# ToDo | 1 ml syringe that is pumped to 0.5 ml and enters 0.6 ml sample and
# ToDo | screws the blocker to 0.5 ml? See General Comments §2
# ToDo |
# ToDo | Future:
# ToDo | 1. If there are two pumps, make sure that the second pump has the
# ToDo | opposite pump direction as the first
# ToDo |
# ToDo | General comments:
# ToDo | 1. Left the prime functionality commented but intact. Maybe we want
# ToDo | to save it for future use
# ToDo | 2. Question: Does the stall detection work when Python controls the
# ToDo | pump? Check the full manual, search the internet
# ToDo |
global PAUSED, INF
PAUSED = True
INF = True
syringes = {'1 ml BD': '4.699',
'3 ml BD': '8.585',
'5 ml BD': '11.99',
'10 ml BD': '14.43',
'20 ml BD': '19.05',
'30 ml BD': '21.59',
'60 ml BD': '26.59'}
syr_volumes = dict()
for key in syringes.keys():
syr_volumes[key] = float(key.split()[0])*1000.0 # ml --> ul
# Rates for NE1010, see: www.syringepump.com/download/index.html
# www.syringepump.com/download/NE-1010Brochure.pdf
# www.syringepump.com/download/NE-1010-510-511%20Rates%20and%20Specifications.pdf
# Max rates translated from mL/hr to ul/hr (i.e. factor 10**3 increase)
max_rates = {'1 ml BD': 191100.0,
'3 ml BD': 637900.0,
'5 ml BD': 1244000.0,
'10 ml BD': 1802000.0,
'20 ml BD': 3141000.0,
'30 ml BD': 4035000.0,
'60 ml BD': 6120000.0}
# Min rates in ul/hr
min_rates = {'1 ml BD': 1.495,
'3 ml BD': 4.868,
'5 ml BD': 9.495,
'10 ml BD': 13.76,
'20 ml BD': 23.97,
'30 ml BD': 30.79,
'60 ml BD': 46.70}
dt_sec = 0.5 # 0.5 seconds pump time from start to stop
dt_hr = dt_sec/3600.0 # dt_sec in hours
# Creates a class with a signal method
class FileCreateSignal(QtCore.QObject):
create_signal = QtCore.pyqtSignal()
# This class defines AddedFileWatcher, a modified PatternMatchingEventHandler
# 1. Monitor files that end in file names defined by list 'patterns'
# 2. When a file matching the pattern is created, on_created is called and
# is set up to send a signal.
class AddedFileWatcher(PatternMatchingEventHandler):
def __init__(self, patterns, ignore_directories=True):
super().__init__()
self._ignore_directories = ignore_directories
self._patterns = patterns
self.fcs = FileCreateSignal()
def on_created(self, event):
global PAUSED
if PAUSED:
pass
elif not PAUSED:
self.fcs.create_signal.emit()
class PumpControl(QtGui.QWidget):
global PAUSED
def __init__(self):
super(PumpControl, self).__init__()
# Create new instance of observer
self.obs = Observer()
# Create new instance of filewatcher
self.new_file_watch = AddedFileWatcher(patterns=['*.png', '*.jpg'])
# Connect to pump_vol
self.new_file_watch.fcs.create_signal.connect(self.pump_vol)
self.initUI()
def initUI(self):
self.ser = serial.Serial(serial_port, 19200, timeout=.1)
print('Connected to', self.ser.name)
# See if it works
# new_era.reset_all(self.ser)
# set grid layout
grid = QtGui.QGridLayout()
grid.setSpacing(5)
# Run Button
self.runbtn = QtGui.QPushButton('Start Image Detection', self)
grid.addWidget(self.runbtn, 1, 2)
self.runbtn.setCheckable(True)
self.runbtn.setChecked(0)
self.runbtn.clicked.connect(self.start_detection)
# Stop button
self.stopbtn = QtGui.QPushButton('Stop', self)
grid.addWidget(self.stopbtn, 1, 3)
self.stopbtn.setCheckable(True)
self.stopbtn.setChecked(0)
self.stopbtn.clicked.connect(self.stop_detection)
# K Row 1.1: Add button to select user directory where images from
# microscope will be stored
self.dirbutton = QtGui.QPushButton('Image Directory', self)
grid.addWidget(self.dirbutton, 1, 1)
self.image_dir = ''
# self.image_dir = 'C:\\Users\\admin' # Standard?
# Test this folder later: 'D:\\test_folder_programming\\add_files'
self.dirbutton.clicked.connect(self.set_dir)
# Column labels
grid.addWidget(QtGui.QLabel('Pump number'), 2, 0)
grid.addWidget(QtGui.QLabel('Syringe'), 2, 1)
grid.addWidget(QtGui.QLabel('Sample volume (ul)'), 2, 2)
grid.addWidget(QtGui.QLabel('Volume/pump (ul)'), 2, 3)
grid.addWidget(QtGui.QLabel('Current vol (ul)'), 2, 4)
# find pumps
pumps = new_era.find_pumps(self.ser)
# iterate over pumps, adding a row for each
self.mapper = QtCore.QSignalMapper(self)
# self.primemapper = QtCore.QSignalMapper(self) # No priming
self.currvol = dict()
# self.currflow = dict()
self.sample_vol = dict()
self.syr_volume = dict()
self.syr_max_rate = dict()
self.syr_min_rate = dict()
self.volumes = dict()
self.rates = dict()
# self.prime_btns = dict() # No priming
# For future: only allow one or two pumps, no more. Second pump must
# have same rate but with reversed direction to the first one
for i, pump in enumerate(pumps):
row = 3+i
# add pump number
pumplab = QtGui.QLabel('Pump %i' % pump)
pumplab.setAlignment(QtCore.Qt.AlignHCenter)
grid.addWidget(pumplab, row, 0)
# add syringe pulldown
combo = QtGui.QComboBox(self)
[combo.addItem(s) for s in sorted(syringes)]
self.mapper.setMapping(combo, pump)
combo.activated.connect(self.mapper.map)
grid.addWidget(combo, row, 1)
self.syr_volume[pump] = 0.0 # Syringe volume
self.syr_max_rate[pump] = 0.0 # max rate in ul/hr
self.syr_min_rate[pump] = 0.0 # min rate in ul/hr
# Textbox to set sample volume
self.sample_vol[pump] = QtGui.QLineEdit(self)
self.sample_vol[pump].setText('0')
grid.addWidget(self.sample_vol[pump], row, 2)
# Textbox to add volume to pump
self.volumes[pump] = QtGui.QLineEdit(self)
self.volumes[pump].setText('0')
grid.addWidget(self.volumes[pump], row, 3)
# The unit for the rate is ul/hr while the user enters the volume
# pumped per image in ul (and the pump is pumping for dt_sec sec)
self.rates[pump] = str(
float(self.volumes[pump].text().split()[0]) / dt_hr) + ' ul/h'
# add label to show current volume pumped
self.currvol[pump] = QtGui.QLabel(self)
self.currvol[pump].setAlignment(QtCore.Qt.AlignHCenter)
grid.addWidget(self.currvol[pump], row, 4)
# No priming
# No prime button! # add prime button
# btn = QtGui.QPushButton('Prime', self)
# btn.setCheckable(True) # makes the button toggleable
# self.primemapper.setMapping(btn, pump)
# btn.clicked.connect(self.primemapper.map)
# grid.addWidget(btn, row, 5)
# self.prime_btns[pump] = btn
# mapper thing
self.mapper.mapped.connect(self.update_syringe)
# No priming
# self.primemapper.mapped.connect(self.prime_pumps)
# Pump count status bar
self.pump_counter = 0
self.pump_count = QtGui.QLabel(self)
grid.addWidget(self.pump_count, 0, 2)
self.pump_count.setText('Pump count: {}'.format(self.pump_counter))
# Volume infused
self.pumped_volume = 0
self.pump_volume = QtGui.QLabel(self)
grid.addWidget(self.pump_volume, 0, 3)
self.pump_volume.setText('Vol INF: {} (ul)'.format(self.pumped_volume))
# set up the status bar
self.curr_state = 'Stopped'+'; {}'.format('INF' if INF else 'WDR')
self.statusbar = QtGui.QLabel(self)
grid.addWidget(self.statusbar, 1, 4)
self.statusbar.setText('Status: '+self.curr_state)
# set up the last command bar
self.commandbar = QtGui.QLabel(self)
grid.addWidget(self.commandbar, row+1, 0, 1, 4)
# No priming
# make the prime state: a set containing the priming pumps
# self.prime_state = set()
# initialize: set all flow rates to zero
self.run_update(self.rates)
self.stop_all()
[self.update_syringe(p) for p in pumps]
self.commandbar.setText('')
# format the page
self.setLayout(grid)
self.setWindowTitle('Pump control')
self.show()
# K get user directory
def set_dir(self):
dialog = QtGui.QFileDialog()
dir_path = dialog.getExistingDirectory(None, 'Select microscope image'
' folder')
self.image_dir = dir_path
def update_rates(self):
global INF, PAUSED
# Just to be sure
self.stop_all()
# Calculate new rates from self.volumes and send to pump(s)
rates = {}
for pump in self.rates:
# check if pump volumes and sample volumes are floats and > 0
if is_positive_float(self.volumes[pump].text().split()[0]) \
and \
is_positive_float(self.sample_vol[pump].text().split()[0]):
# Check if sample volume > syringe max volume; Must update
# syringe volume before checking
if float(self.sample_vol[pump].text().split()[0]) > \
self.syr_volume[pump]:
self.sample_vol[pump].setText('0.0')
print('Sample volume larger than syringe volume!')
self.runbtn.setChecked(0)
self.stopbtn.setChecked(0)
self.stop_all()
PAUSED = True
else: # All good
rate_phr = float(self.volumes[pump].text().split()[0]) / \
dt_hr # Translate volume to ul/hr
# Check if rate is below min/above max rate
if rate_phr > self.syr_max_rate[pump]:
rate_phr = self.syr_max_rate[pump]
print('Pump volume reset to max')
elif rate_phr < self.syr_min_rate[pump]:
rate_phr = self.syr_min_rate[pump]
print('Pump volume reset to min')
self.volumes[pump].setText('{:.2f} ul'.format(
rate_phr*dt_hr))
self.currvol[pump].setText('{:.2f} ul'.format(
rate_phr*dt_hr))
# if INF: # if infusing, rate_phr > 0
# rate_phr = math.copysign(rate_phr, 1)
# elif not INF: # if withdrawing, rate_phr < 0
# rate_phr = math.copysign(rate_phr, -1)
rate = (rate_phr if INF else -rate_phr)
self.rates[pump] = str(rate) + ' ul/h'
rates[pump] = str(self.rates[pump]).split()[0].strip()
# else set to zero
else:
rates[pump] = '0.0'
self.volumes[pump].setText('0.0')
self.rates[pump] = '0.0 ul/h'
# HERE CREATE POPUP
print('Volumes > 0 required for both sample and pump per '
'image!')
self.curr_state = 'Stopped' + '; {}'.format(
'INF' if INF else 'WDR')
self.statusbar.setText('Status: ' + self.curr_state)
self.runbtn.setChecked(0)
self.stopbtn.setChecked(0)
self.stop_all()
PAUSED = True
# Send rates to pumps
new_era.set_rates(self.ser, rates)
# K Start observer
def start_detection(self):
global PAUSED, INF
if self.image_dir == '': # Checking if dir is set
self.set_dir()
self.update_rates()
PAUSED = False
if not self.obs.is_alive():
self.obs.schedule(self.new_file_watch, self.image_dir)
self.obs.start()
self.runbtn.setChecked(1)
self.stopbtn.setChecked(0)
# K Stop the observer by | |
<reponame>edupo/python-ivi
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014-2016 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import time
from .. import ivi
from .. import scpi
class diconGP700(scpi.common.IdnCommand, scpi.common.ErrorQuery, scpi.common.Reset,
scpi.common.SelfTest, scpi.common.Memory,
ivi.Driver):
"DiCon Fiberoptics GP700 Programmable Fiberoptic Instrument"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'GP700')
super(diconGP700, self).__init__(*args, **kwargs)
self._identity_description = "DiCon Fiberoptics GP700 Programmable Fiberoptic Instrument"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "DiCon Fiberoptics Inc"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 0
self._identity_specification_minor_version = 0
self._identity_supported_instrument_models = ['GP700']
self._self_test_delay = 5
self._memory_size = 8
self._memory_offset = 1
self._config = ""
self._attenuator_count = 0
self._attenuator_name = list()
self._attenuator_level = list()
self._attenuator_level_max = list()
self._filter_count = 0
self._filter_name = list()
self._filter_wavelength = list()
self._filter_wavelength_max = list()
self._filter_wavelength_min = list()
self._matrix_input_count = 0
self._matrix_input_name = list()
self._matrix_output_count = 0
self._matrix_input_output = list()
self._switch_count = 0
self._switch_name = list()
self._switch_output = list()
self._switch_input = list()
self._switch_output_count = list()
self._switch_input_count = list()
self._add_property('attenuators[].level',
self._get_attenuator_level,
self._set_attenuator_level,
None,
ivi.Doc("""
Specifies the level of the attenuator module. The units are dB.
"""))
self._add_property('attenuators[].level_max',
self._get_attenuator_level_max,
None,
None,
ivi.Doc("""
Returns the maximum attenuation level supported. The units are dB.
"""))
self._add_property('attenuators[].name',
self._get_attenuator_name,
None,
None,
ivi.Doc("""
Returns the name of the attenuator module.
"""))
self._add_property('filters[].wavelength',
self._get_filter_wavelength,
self._set_filter_wavelength,
None,
ivi.Doc("""
Specifies the center wavelength of the filter module. The units are nm.
"""))
self._add_property('filters[].wavelength_max',
self._get_filter_wavelength_max,
None,
None,
ivi.Doc("""
Returns the maximum center wavelength of the filter. The units are nm.
"""))
self._add_property('filters[].wavelength_min',
self._get_filter_wavelength_min,
None,
None,
ivi.Doc("""
Returns the minimum center wavelength of the filter. The units are nm.
"""))
self._add_property('filters[].name',
self._get_filter_name,
None,
None,
ivi.Doc("""
Returns the name of the filter module.
"""))
self._add_property('switches[].output',
self._get_switch_output,
self._set_switch_output,
None,
ivi.Doc("""
Specify switch output connection.
"""))
self._add_property('switches[].output_count',
self._get_switch_output_count,
None,
None,
ivi.Doc("""
Query number of outputs supported by switch.
"""))
self._add_property('switches[].input',
self._get_switch_input,
self._set_switch_input,
None,
ivi.Doc("""
Specify switch input connection.
"""))
self._add_property('switches[].input_count',
self._get_switch_input_count,
None,
None,
ivi.Doc("""
Query number of inputs supported by switch.
"""))
self._add_method('switches[].get',
self._switch_get,
ivi.Doc("""
Get current switch input and output configuration.
"""))
self._add_method('switches[].set',
self._switch_set,
ivi.Doc("""
Set switch input and output configuration.
"""))
self._add_property('switches[].name',
self._get_switch_name,
None,
None,
ivi.Doc("""
Returns the name of the switch module.
"""))
self._add_method('memory.save',
self._memory_save,
ivi.Doc("""
Save device configuration to the specified memory slot.
"""))
self._add_method('memory.recall',
self._memory_recall,
ivi.Doc("""
Recall device configuration from the specified memory slot.
"""))
if self._initialized_from_constructor:
self._init_channels()
def _initialize(self, resource = None, id_query = False, reset = False, **keywargs):
"Opens an I/O session to the instrument."
super(diconGP700, self)._initialize(resource, id_query, reset, **keywargs)
# interface clear
if not self._driver_operation_simulate:
self._clear()
# check ID
if id_query and not self._driver_operation_simulate:
id = self.identity.instrument_model
id_check = self._instrument_id
id_short = id[:len(id_check)]
if id_short != id_check:
raise Exception("Instrument ID mismatch, expecting %s, got %s", id_check, id_short)
# reset
if reset:
self.utility_reset()
if not self._initialized_from_constructor:
self._init_channels()
def _utility_disable(self):
pass
def _utility_lock_object(self):
pass
def _utility_reset(self):
if not self._driver_operation_simulate:
self._write("*RST")
time.sleep(0.1)
self._clear()
self.driver_operation.invalidate_all_attributes()
def _utility_unlock_object(self):
pass
def _init_channels(self):
try:
super(diconGP700, self)._init_channels()
except AttributeError:
pass
config = self._get_config()
self._attenuator_count = 0
self._attenuator_name = list()
self._attenuator_level = list()
self._attenuator_level_max = list()
self._filter_count = 0
self._filter_name = list()
self._filter_wavelength = list()
self._filter_wavelength_max = list()
self._filter_wavelength_min = list()
self._matrix_input_count = 0
self._matrix_input_name = list()
self._matrix_output_count = 0
self._matrix_input_output = list()
self._switch_count = 0
self._switch_name = list()
self._switch_output = list()
self._switch_input = list()
self._switch_output_count = list()
self._switch_input_count = list()
lst = config.split(",")
lst = [x.strip() for x in lst]
lst.sort()
for itm in lst:
v = itm.split(" ")
if len(itm) == 0:
continue
if v[0] == 'MATRIX':
self._matrix_input_count = int(v[1][5:])
self._matrix_output_count = int(v[2][6:])
elif itm[0] == 'A':
if v[0] not in self._attenuator_name:
self._attenuator_count += 1
self._attenuator_name.append(v[0])
self._attenuator_level.append(0.0)
self._attenuator_level_max.append(0.0)
i = ivi.get_index(self._attenuator_name, v[0])
self._attenuator_level[i] = 0.0
self._attenuator_level_max[i] = float(v[1])
elif itm[0] == 'F':
if v[0] not in self._filter_name:
self._filter_count += 1
self._filter_name.append(v[0])
self._filter_wavelength.append(0.0)
self._filter_wavelength_min.append(0.0)
self._filter_wavelength_max.append(0.0)
i = ivi.get_index(self._filter_name, v[0])
self._filter_wavelength[i] = 0.0
self._filter_wavelength_min[i] = float(v[1][3:])
self._filter_wavelength_max[i] = float(v[2][3:])
elif itm[0] == 'M':
if v[0] not in self._switch_name:
self._switch_count += 1
self._switch_name.append(v[0])
self._switch_input.append(0)
self._switch_output.append(0)
self._switch_input_count.append(0)
self._switch_output_count.append(0)
i = ivi.get_index(self._switch_name, v[0])
self._switch_input[i] = 1
self._switch_output[i] = 0
self._switch_input_count[i] = int(v[2][1:])
self._switch_output_count[i] = int(v[1][1:])
elif itm[0] == 'P':
if v[0] not in self._switch_name:
self._switch_count += 1
self._switch_name.append(v[0])
self._switch_input.append(0)
self._switch_output.append(0)
self._switch_input_count.append(0)
self._switch_output_count.append(0)
i = ivi.get_index(self._switch_name, v[0])
self._switch_input[i] = 1
self._switch_output[i] = 0
self._switch_input_count[i] = 1
self._switch_output_count[i] = int(v[1][7:])
elif itm[0] == 'S':
cnt = int(v[0][1:])
for i in range(cnt):
n = 'S%02d' % (i+1)
if n not in self._switch_name:
self._switch_count += 1
self._switch_name.append(n)
self._switch_input.append(0)
self._switch_output.append(0)
self._switch_input_count.append(0)
self._switch_output_count.append(0)
i = ivi.get_index(self._switch_name, n)
self._switch_input[i] = 1
self._switch_output[i] = 1
self._switch_input_count[i] = 1
self._switch_output_count[i] = 2
self.attenuators._set_list(self._attenuator_name)
self.filters._set_list(self._filter_name)
self.switches._set_list(self._switch_name)
def _get_config(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
self._config = self._ask("system:config?")
self._set_cache_valid()
return self._config
def _get_attenuator_level(self, index):
index = ivi.get_index(self._attenuator_name, index)
name = self._attenuator_name[index]
if not self._driver_operation_simulate and not self._get_cache_valid():
resp = self._ask("%s?" % (name))
self._attenuator_level = float(resp)
self._set_cache_valid()
return self._attenuator_level[index]
def _set_attenuator_level(self, index, value):
index = ivi.get_index(self._attenuator_name, index)
name = self._attenuator_name[index]
value = float(value)
if value < 0 or value > self._attenuator_level_max[index]:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write("%s %f" % (name, value))
self._attenuator_level[index] = value
self._set_cache_valid()
def _get_attenuator_level_max(self, index):
index = ivi.get_index(self._attenuator_name, index)
return self._attenuator_level_max[index]
def _get_attenuator_name(self, index):
index = ivi.get_index(self._attenuator_name, index)
return self._attenuator_name[index]
def _get_filter_wavelength(self, index):
index = ivi.get_index(self._filter_name, index)
name = self._filter_name[index]
if not self._driver_operation_simulate and not self._get_cache_valid():
resp = self._ask("%s?" % (name))
self._filter_wavelength = float(resp)
self._set_cache_valid()
return self._filter_wavelength[index]
def _set_filter_wavelength(self, index, value):
index = ivi.get_index(self._filter_name, index)
name = self._filter_name[index]
value = float(value)
if value < self._filter_wavelength_min[index] or value > self._filter_wavelength_max[index]:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write("%s %f" % (name, value))
self._filter_wavelength[index] = value
self._set_cache_valid()
def _get_filter_wavelength_max(self, index):
index = ivi.get_index(self._filter_name, index)
return self._filter_wavelength[index]
def _get_filter_wavelength_min(self, index):
index = ivi.get_index(self._filter_name, index)
return self._filter_wavelength[index]
def _get_filter_name(self, index):
index = ivi.get_index(self._filter_name, index)
return self._filter_name[index]
def _get_switch_output(self, index):
return self._switch_get(index)[0]
def _set_switch_output(self, index, value):
self._switch_set(index, value)
def _get_switch_output_count(self, index):
index = ivi.get_index(self._switch_name, index)
return self._switch_output_count[index]
def _get_switch_input(self, index):
return self._switch_get(index)[1]
def _set_switch_input(self, index, value):
index = ivi.get_index(self._switch_name, index)
self._switch_set(index, self._switch_output[index], value)
def _get_switch_input_count(self, index):
index = ivi.get_index(self._switch_name, index)
return self._switch_input_count[index]
def _switch_get(self, index):
index = ivi.get_index(self._switch_name, index)
name = self._switch_name[index]
if name[0] == 'M':
if not self._driver_operation_simulate:
if not self._get_cache_valid('switch_output', index) or not self._get_cache_valid('switch_input', index):
#if True:
resp = self._ask("%s?" % name)
lst = resp.split(',')
self._switch_output[index] = int(lst[0].strip())
self._switch_input[index] = int(lst[1].strip())
self._set_cache_valid(True, 'switch_output', index)
self._set_cache_valid(True, 'switch_input', index)
return (self._switch_output[index], self._switch_input[index])
elif name[0] == 'P' or name[0] == 'S':
if not self._driver_operation_simulate:
if not self._get_cache_valid('switch_output', index):
#if True:
resp = self._ask("%s?" % name)
self._switch_output[index] = int(resp.strip())
self._switch_input[index] = 1
self._set_cache_valid(True, 'switch_output', index)
self._set_cache_valid(True, 'switch_input', index)
return (self._switch_output[index], self._switch_input[index])
def _switch_set(self, index, output, input=None):
index = ivi.get_index(self._switch_name, index)
name = self._switch_name[index]
output = int(output)
if input is not None:
input = int(input)
if name[0] | |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
#from gym.wrappers.monitoring import VideoRecorder
import gym
from dotmap import DotMap
import time
import sys
import re
import multiprocessing
import os.path as osp
import gym
from collections import defaultdict
import tensorflow as tf
import numpy as np
import dmbrl.misc.MBExp
from dmbrl.misc.run import run
from baselines.common.vec_env import VecFrameStack, VecNormalize, VecEnv
from baselines.common.vec_env.vec_video_recorder import VecVideoRecorder
from baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_vec_env, make_env
from baselines.common.tf_util import get_session
from baselines import logger
from importlib import import_module
import pybullet_envs.bullet.racecarGymEnv as e
# from stable_baselines.sac.policies import MlpPolicy
# from stable_baselines.common.vec_env import DummyVecEnv
# from stable_baselines import SAC
try:
from mpi4py import MPI
except ImportError:
MPI = None
try:
import pybullet_envs
except ImportError:
pybullet_envs = None
try:
import roboschool
except ImportError:
roboschool = None
_game_envs = defaultdict(set)
for env in gym.envs.registry.all():
# TODO: solve this with regexes
env_type = env._entry_point.split(':')[0].split('.')[-1]
_game_envs[env_type].add(env.id)
# reading benchmark names directly from retro requires
# importing retro here, and for some reason that crashes tensorflow
# in ubuntu
_game_envs['retro'] = {
'BubbleBobble-Nes',
'SuperMarioBros-Nes',
'TwinBee3PokoPokoDaimaou-Nes',
'SpaceHarrier-Nes',
'SonicTheHedgehog-Genesis',
'Vectorman-Genesis',
'FinalFight-Snes',
'SpaceInvaders-Snes',
}
"""
A simple version of Proximal Policy Optimization (PPO) using single thread.
Based on:
1. Emergence of Locomotion Behaviours in Rich Environments (Google Deepmind): [https://arxiv.org/abs/1707.02286]
2. Proximal Policy Optimization Algorithms (OpenAI): [https://arxiv.org/abs/1707.06347]
View more on my tutorial website: https://morvanzhou.github.io/tutorials
Dependencies:
tensorflow r1.2
gym 0.9.2
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import gym
EP_MAX = 100
EP_LEN = 200
GAMMA = 0.9
A_LR = 0.0001
C_LR = 0.0002
BATCH = 8
A_UPDATE_STEPS = 10
C_UPDATE_STEPS = 10
S_DIM, A_DIM = 2,1
METHOD = [
dict(name='kl_pen', kl_target=0.01, lam=0.5), # KL penalty
dict(name='clip', epsilon=0.2), # Clipped surrogate objective, find this is better
][1] # choose the method for optimization
class PPO(object): # ppo for choice model
def __init__(self):
self.sess = tf.Session()
self.tfs = tf.placeholder(tf.float32, [None, S_DIM], 'state')
# critic
with tf.variable_scope('critic'):
l1 = tf.layers.dense(self.tfs, 100, tf.nn.relu)
self.v = tf.layers.dense(l1, 1)
self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
self.advantage = self.tfdc_r - self.v
self.closs = tf.reduce_mean(tf.square(self.advantage))
self.ctrain_op = tf.train.AdamOptimizer(C_LR).minimize(self.closs)
# actor
pi, pi_params = self._build_anet('pi', trainable=True)
oldpi, oldpi_params = self._build_anet('oldpi', trainable=False)
with tf.variable_scope('sample_action'):
self.sample_op = tf.squeeze(pi.sample(1), axis=0) # choosing action
with tf.variable_scope('update_oldpi'):
self.update_oldpi_op = [oldp.assign(p) for p, oldp in zip(pi_params, oldpi_params)]
self.tfa = tf.placeholder(tf.float32, [None, A_DIM], 'action')
self.tfadv = tf.placeholder(tf.float32, [None, 1], 'advantage')
with tf.variable_scope('loss'):
with tf.variable_scope('surrogate'):
# ratio = tf.exp(pi.log_prob(self.tfa) - oldpi.log_prob(self.tfa))
ratio = pi.prob(self.tfa) / oldpi.prob(self.tfa)
surr = ratio * self.tfadv
if METHOD['name'] == 'kl_pen':
self.tflam = tf.placeholder(tf.float32, None, 'lambda')
kl = tf.distributions.kl_divergence(oldpi, pi)
self.kl_mean = tf.reduce_mean(kl)
self.aloss = -(tf.reduce_mean(surr - self.tflam * kl))
else: # clipping method, find this is better
self.aloss = -tf.reduce_mean(tf.minimum(
surr,
tf.clip_by_value(ratio, 1.-METHOD['epsilon'], 1.+METHOD['epsilon'])*self.tfadv))
with tf.variable_scope('atrain'):
self.atrain_op = tf.train.AdamOptimizer(A_LR).minimize(self.aloss)
tf.summary.FileWriter("log/", self.sess.graph)
self.sess.run(tf.global_variables_initializer())
def update(self, s, a, r):
self.sess.run(self.update_oldpi_op)
adv = self.sess.run(self.advantage, {self.tfs: s, self.tfdc_r: r})
# adv = (adv - adv.mean())/(adv.std()+1e-6) # sometimes helpful
# update actor
if METHOD['name'] == 'kl_pen':
for _ in range(A_UPDATE_STEPS):
_, kl = self.sess.run(
[self.atrain_op, self.kl_mean],
{self.tfs: s, self.tfa: a, self.tfadv: adv, self.tflam: METHOD['lam']})
if kl > 4*METHOD['kl_target']: # this in in google's paper
break
if kl < METHOD['kl_target'] / 1.5: # adaptive lambda, this is in OpenAI's paper
METHOD['lam'] /= 2
elif kl > METHOD['kl_target'] * 1.5:
METHOD['lam'] *= 2
METHOD['lam'] = np.clip(METHOD['lam'], 1e-4, 10) # sometimes explode, this clipping is my solution
else: # clipping method, find this is better (OpenAI's paper)
[self.sess.run(self.atrain_op, {self.tfs: s, self.tfa: a, self.tfadv: adv}) for _ in range(A_UPDATE_STEPS)]
# update critic
[self.sess.run(self.ctrain_op, {self.tfs: s, self.tfdc_r: r}) for _ in range(C_UPDATE_STEPS)]
def _build_anet(self, name, trainable):
with tf.variable_scope(name):
l1 = tf.layers.dense(self.tfs, 100, tf.nn.relu, trainable=trainable)
mu = 2 * tf.layers.dense(l1, A_DIM, tf.nn.tanh, trainable=trainable)
sigma = tf.layers.dense(l1, A_DIM, tf.nn.softplus, trainable=trainable)
norm_dist = tf.distributions.Normal(loc=mu, scale=sigma)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return norm_dist, params
def choose_action(self, s):
s = s[np.newaxis, :]
a = self.sess.run(self.sample_op, {self.tfs: s})[0]
return np.clip(a, -2, 2)
def get_v(self, s):
if s.ndim < 2: s = s[np.newaxis, :]
return self.sess.run(self.v, {self.tfs: s})[0, 0]
# env = gym.make('Pendulum-v0').unwrapped
# ppo = PPO()
# all_ep_r = []
# for ep in range(EP_MAX):
# s = env.reset()
# buffer_s, buffer_a, buffer_r = [], [], []
# ep_r = 0
# for t in range(EP_LEN): # in one episode
# env.render()
# a = ppo.choose_action(s)
# s_, r, done, _ = env.step(a)
# buffer_s.append(s)
# buffer_a.append(a)
# buffer_r.append((r+8)/8) # normalize reward, find to be useful
# s = s_
# ep_r += r
# # update ppo
# if (t+1) % BATCH == 0 or t == EP_LEN-1:
# v_s_ = ppo.get_v(s_)
# discounted_r = []
# for r in buffer_r[::-1]:
# v_s_ = r + GAMMA * v_s_
# discounted_r.append(v_s_)
# discounted_r.reverse()
# bs, ba, br = np.vstack(buffer_s), np.vstack(buffer_a), np.array(discounted_r)[:, np.newaxis]
# buffer_s, buffer_a, buffer_r = [], [], []
# ppo.update(bs, ba, br)
# if ep == 0: all_ep_r.append(ep_r)
# else: all_ep_r.append(all_ep_r[-1]*0.9 + ep_r*0.1)
# print(
# 'Ep: %i' % ep,
# "|Ep_r: %i" % ep_r,
# ("|Lam: %.4f" % METHOD['lam']) if METHOD['name'] == 'kl_pen' else '',
# )
# plt.plot(np.arange(len(all_ep_r)), all_ep_r)
# plt.xlabel('Episode');plt.ylabel('Moving averaged episode reward');plt.show()
def train(args, extra_args):
env_type, env_id = get_env_type(args)
print('env_type: {}'.format(env_type))
total_timesteps = int(args.num_timesteps)
seed = args.seed
learn = get_learn_function(args.alg)
alg_kwargs = get_learn_function_defaults(args.alg, env_type)
alg_kwargs.update(extra_args)
env = build_env(args)
if args.save_video_interval != 0:
env = VecVideoRecorder(env, osp.join(logger.get_dir(), "videos"), record_video_trigger=lambda x: x % args.save_video_interval == 0, video_length=args.save_video_length)
# env = e.RacecarGymEnv(isDiscrete=False ,renders=True)
# print('bbbbbbbbbb')
# print(env)
if args.network:
alg_kwargs['network'] = args.network
else:
if alg_kwargs.get('network') is None:
alg_kwargs['network'] = get_default_network(env_type)
print('Training {} on {}:{} with arguments \n{}'.format(args.alg, env_type, env_id, alg_kwargs))
#env = e.RacecarGymEnv(isDiscrete=False ,renders=True)
model = learn(
env=env,
seed=seed,
total_timesteps=total_timesteps,
**alg_kwargs
)
return model, env
def build_env(args):
ncpu = multiprocessing.cpu_count()
if sys.platform == 'darwin': ncpu //= 2
nenv = args.num_env or ncpu
alg = args.alg
seed = args.seed
env_type, env_id = get_env_type(args)
if env_type in {'atari', 'retro'}:
if alg == 'deepq':
env = make_env(env_id, env_type, seed=seed, wrapper_kwargs={'frame_stack': True})
elif alg == 'trpo_mpi':
env = make_env(env_id, env_type, seed=seed)
else:
frame_stack_size = 4
env = make_vec_env(env_id, env_type, nenv, seed, gamestate=args.gamestate, reward_scale=args.reward_scale)
env = VecFrameStack(env, frame_stack_size)
else:
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
config.gpu_options.allow_growth = True
get_session(config=config)
flatten_dict_observations = alg not in {'her'}
env = make_vec_env(env_id, env_type, args.num_env or 1, seed, reward_scale=args.reward_scale, flatten_dict_observations=flatten_dict_observations)
if env_type == 'mujoco':
env = VecNormalize(env, use_tf=True)
return env
def get_env_type(args):
env_id = args.env
if args.env_type is not None:
return args.env_type, env_id
# Re-parse the gym registry, since we could have new envs since last time.
for env in gym.envs.registry.all():
env_type = env._entry_point.split(':')[0].split('.')[-1]
_game_envs[env_type].add(env.id) # This is a set so add is idempotent
if env_id in _game_envs.keys():
env_type = env_id
env_id = [g for g in _game_envs[env_type]][0]
else:
env_type = None
for g, e in _game_envs.items():
if env_id in e:
env_type = g
break
if ':' in env_id:
env_type = re.sub(r':.*', '', env_id)
assert env_type is not None, 'env_id {} is not recognized in env types'.format(env_id, _game_envs.keys())
return env_type, env_id
def get_default_network(env_type):
if env_type in {'atari', 'retro'}:
return 'cnn'
else:
return 'mlp'
def get_alg_module(alg, submodule=None):
submodule = submodule or alg
try:
# first try to import the alg module from baselines
alg_module = import_module('.'.join(['baselines', alg, submodule]))
except ImportError:
# then from rl_algs
alg_module = import_module('.'.join(['rl_' + 'algs', alg, submodule]))
return alg_module
def get_learn_function(alg):
return get_alg_module(alg).learn
def get_learn_function_defaults(alg, env_type):
try:
alg_defaults = get_alg_module(alg, 'defaults')
kwargs = getattr(alg_defaults, env_type)()
except (ImportError, AttributeError):
kwargs = {}
return kwargs
def parse_cmdline_kwargs(args):
'''
convert a list of '='-spaced command-line arguments to a dictionary, evaluating python objects when possible
'''
def parse(v):
assert isinstance(v, str)
try:
return eval(v)
except (NameError, SyntaxError):
return v
return {k: parse(v) for k,v in parse_unknown_args(args).items()}
def configure_logger(log_path, **kwargs):
if log_path is not None:
logger.configure(log_path)
else:
logger.configure(**kwargs)
def get_ppo():#):
# configure logger, disable logging in child MPI processes (with rank > 0)
print('enter main function')
#args1 = ['run.py', '--alg=ppo2', '--env=RacecarBulletEnv-v0', '--load_path=/Users/huangyixuan/model/racecar_ppo2', '--play']
#args1 = ['run.py', '--alg=ppo2', '--env=RacecarBulletEnv-v0', '--num_timesteps=0', '--load_path=/home/jovyan/baselines/model/racecar_1e6', '--play']
#args1 = ['run.py', '--alg=ppo2', '--env=RacecarBulletEnv-v0', '--num_timesteps=0', '--load_path=/home/gao-4144/baselines/model/racecar_1e6', '--play']
# if 4e5 ,it uses total_night
arg_parser = common_arg_parser()
args1, unknown_args = arg_parser.parse_known_args(args1)
print('unknown_args')
print(unknown_args)
extra_args = parse_cmdline_kwargs(unknown_args)
print('extra')
print(extra_args)
if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:
rank = 0
#configure_logger(args.log_path)
else:
rank = MPI.COMM_WORLD.Get_rank()
configure_logger(args1.log_path, format_strs=[])
model, env = train(args1, extra_args)
return model
#env = e.RacecarGymEnv(isDiscrete=False ,renders=True)
# if args.save_path is not None and rank == 0:
# save_path = osp.expanduser(args.save_path)
# model.save(save_path)
# if args.play:
# logger.log("Running trained model")
# obs = env.reset()
# state = model.initial_state if hasattr(model, 'initial_state') else None
# dones = np.zeros((1,))
# episode_rew = 0
# while True:
| |
be dense (contain all bits) or sparse (just the on
bits).
use_pharm_features : bool (default=False)
Switch to use pharmacophoric features as atom representation instead of
explicit atomic numbers etc.
Returns
-------
fingerprint : numpy array
Calsulated FP of fixed size (dense) or on bits indices (sparse). Dtype
is either integer or boolean.
"""
# Hash atom environments
mol_hashed = []
atom_repr_dict = {}
for idx, atom in enumerate(mol.atoms):
if atom.atomicnum == 1:
continue
atom_repr_dict[idx] = _ECFP_atom_repr(
mol, idx, use_pharm_features=use_pharm_features)
for idx in atom_repr_dict.keys():
mol_hashed.append(_ECFP_atom_hash(mol, idx, depth=depth,
use_pharm_features=use_pharm_features,
atom_repr_dict=atom_repr_dict))
mol_hashed = np.array(sorted(chain(*mol_hashed)))
# folding
mol_hashed = fold(mol_hashed, size)
if not count_bits:
mol_hashed = np.unique(mol_hashed)
# dense or sparse FP
if not sparse:
mol_hashed = sparse_to_dense(mol_hashed, size=size)
return mol_hashed
def SPLIF(ligand, protein, depth=1, size=4096, distance_cutoff=4.5):
"""Calculates structural protein-ligand interaction fingerprint (SPLIF),
based on http://pubs.acs.org/doi/abs/10.1021/ci500319f.
Parameters
----------
ligand, protein : oddt.toolkit.Molecule object
Molecules, which are analysed in order to find interactions.
depth : int (deafult = 1)
The depth of the fingerprint, i.e. the number of bonds in Morgan
algorithm. Note: For ECFP2: depth = 1, ECFP4: depth = 2, etc.
size: int (default = 4096)
SPLIF is folded to given size.
distance_cutoff: float (default=4.5)
Cutoff distance for close contacts.
Returns
-------
SPLIF : numpy array
Calculated SPLIF.shape = (no. of atoms, ). Every row consists of three
elements:
row[0] = index of hashed atoms
row[1].shape = (7, 3) -> ligand's atom coords and 6 his neigbor's
row[2].shape = (7, 3) -> protein's atom coords and 6 his neigbor's
"""
# removing h
protein_dict = protein.atom_dict[protein.atom_dict['atomicnum'] != 1]
ligand_dict = ligand.atom_dict[ligand.atom_dict['atomicnum'] != 1]
protein_atoms, ligand_atoms = close_contacts(
protein_dict, ligand_dict, cutoff=distance_cutoff)
splif = np.zeros((len(ligand_atoms)),
dtype=[('hash', np.int64), ('ligand_coords', np.float32, (7, 3)),
('protein_coords', np.float32, (7, 3))])
lig_atom_repr = {aidx: _ECFP_atom_repr(ligand, int(aidx))
for aidx in ligand_dict['id']}
prot_atom_repr = {aidx: _ECFP_atom_repr(protein, int(aidx))
for aidx in protein_dict['id']}
for i, (ligand_atom, protein_atom) in enumerate(zip(ligand_atoms,
protein_atoms)):
if ligand_atom['atomicnum'] == 1 or protein_atom['atomicnum'] == 1:
continue
# function sorted used below solves isue, when order of parameteres
# is not correct -> splif(protein, ligand)
splif[i] = (hash32(tuple(sorted((
_ECFP_atom_hash(ligand,
int(ligand_atom['id']),
depth=depth,
atom_repr_dict=lig_atom_repr)[-1],
_ECFP_atom_hash(protein,
int(protein_atom['id']),
depth=depth,
atom_repr_dict=prot_atom_repr)[-1])))),
np.vstack((ligand_atom['coords'].reshape((1, 3)),
ligand_atom['neighbors'])),
np.vstack((protein_atom['coords'].reshape((1, 3)),
protein_atom['neighbors'])))
# folding
splif['hash'] = fold(splif['hash'], size)
return np.sort(splif)
def similarity_SPLIF(reference, query, rmsd_cutoff=1.):
"""Calculates similarity between structural interaction fingerprints,
based on doi:http://pubs.acs.org/doi/abs/10.1021/ci500319f.
Parameters
----------
reference, query: numpy.array
SPLIFs, which are compared in order to determine similarity.
rmsd_cutoff : int (default = 1)
Specific treshold for which, bits are considered as fully matching.
Returns
-------
SimilarityScore : float
Similarity between given fingerprints.
"""
# intersection of reference and query hashed atoms
index = np.intersect1d(reference['hash'], query['hash'])
ref_intersection = reference[np.where(np.in1d(reference['hash'], index))]
ref_group_intersection = np.split(ref_intersection, np.searchsorted(
ref_intersection['hash'], index[1:])) # reference
query_intersection = query[np.where(np.in1d(query['hash'], index))]
query_group_intersection = np.split(query_intersection, np.searchsorted(
query_intersection['hash'], index[1:])) # query
numla = 0 # number of unique matching ligand atoms
nula = 0 # number of unique ligand atoms
numpa = 0 # number of unique matching protein atoms
nupa = 0 # number of unique protein atoms
def combinatorial_rmsd(reference, query):
"""Calculates root mean square deviation between groups of points. It
takes two matrices of shapes e.g (2, 5, 3) and (4, 5, 3) -> (2, 4)."""
return np.sqrt(np.nansum(np.mean(
(reference[:, np.newaxis, ...] - query)**2, axis=-1), axis=-1))
for pair in range(len(ref_group_intersection)):
# reference protein-ligand pair
ref_pair = ref_group_intersection[pair]
# query protein-ligand pair
query_pair = query_group_intersection[pair]
ref_ligand = ref_pair['ligand_coords']
ref_protein = ref_pair['protein_coords']
query_ligand = query_pair['ligand_coords']
query_protein = query_pair['protein_coords']
rmsd_ligand = combinatorial_rmsd(ref_ligand, query_ligand)
rmsd_protein = combinatorial_rmsd(ref_protein, query_protein)
passing_ligand = rmsd_ligand < rmsd_cutoff
passing_protein = rmsd_protein < rmsd_cutoff
num_matching_ligand = min(passing_ligand.any(axis=0).sum(), passing_ligand.any(axis=1).sum())
num_matching_protein = min(passing_protein.any(axis=0).sum(), passing_protein.any(axis=1).sum())
num_all_ligand = len(ref_ligand) + len(query_ligand) - num_matching_ligand
num_all_protein = len(ref_protein) + len(query_protein) - num_matching_protein
numla += num_matching_ligand
numpa += num_matching_protein
nula += num_all_ligand
nupa += num_all_protein
if nula == 0 or nupa == 0:
return 0.
else:
return np.sqrt((numla / nula) * (numpa / nupa))
PLEC_bit_info_record = namedtuple('PLEC_bit_info_record',
'ligand_root_atom_idx ligand_depth protein_root_atom_idx protein_depth')
def PLEC(ligand, protein, depth_ligand=2, depth_protein=4, distance_cutoff=4.5,
size=16384, count_bits=True, sparse=True, ignore_hoh=True, bits_info=None):
"""Protein ligand extended connectivity fingerprint. For every pair of
atoms in contact, compute ECFP and then hash every single, corresponding
depth.
Parameters
----------
ligand, protein : oddt.toolkit.Molecule object
Molecules, which are analysed in order to find interactions.
depth_ligand, depth_protein : int (deafult = (2, 4))
The depth of the fingerprint, i.e. the number of bonds in Morgan
algorithm. Note: For ECFP2: depth = 1, ECFP4: depth = 2, etc.
size: int (default = 16384)
SPLIF is folded to given size.
distance_cutoff: float (default=4.5)
Cutoff distance for close contacts.
sparse : bool (default = True)
Should fingerprints be dense (contain all bits) or sparse (just the on
bits).
count_bits : bool (default = True)
Should the bits be counted or unique. In dense representation it
translates to integer array (count_bits=True) or boolean array if False.
ignore_hoh : bool (default = True)
Should the water molecules be ignored. This is based on the name of the
residue ('HOH').
bits_info : dict or None (default = None)
If dictionary is provided it is filled with information about bit contents.
Root atom index and depth is provided for both ligand and protein.
Dictionary is modified in-place.
Returns
-------
PLEC : numpy array
fp (size = atoms in contacts * max(depth_protein, depth_ligand))
"""
result = []
bit_info_content = []
protein_hash = {'0':[], '1':[], '2':[], '3':[], '4':[], '5':[]}
# removing h
protein_mask = protein_no_h = (protein.atom_dict['atomicnum'] != 1)
if ignore_hoh:
# a copy is needed, so not modifing inplace
protein_mask = protein_mask & (protein.atom_dict['resname'] != 'HOH')
protein_dict = protein.atom_dict[protein_mask]
ligand_dict = ligand.atom_dict[ligand.atom_dict['atomicnum'] != 1]
# atoms in contact
protein_atoms, ligand_atoms = close_contacts(
protein_dict, ligand_dict, cutoff=distance_cutoff)
lig_atom_repr = {aidx: _ECFP_atom_repr(ligand, aidx)
for aidx in ligand_dict['id'].tolist()}
# HOH residues might be connected to metal atoms
prot_atom_repr = {aidx: _ECFP_atom_repr(protein, aidx)
for aidx in protein.atom_dict[protein_no_h]['id'].tolist()}
for ligand_atom, protein_atom in zip(ligand_atoms['id'].tolist(),
protein_atoms['id'].tolist()):
ligand_ecfp = _ECFP_atom_hash(ligand,
ligand_atom,
depth=depth_ligand,
atom_repr_dict=lig_atom_repr)
protein_ecfp = _ECFP_atom_hash(protein,
protein_atom,
depth=depth_protein,
atom_repr_dict=prot_atom_repr)
for i in range(len(protein_ecfp)):
protein_hash[str(i)].append(protein_ecfp[i])
assert len(ligand_ecfp) == depth_ligand + 1
assert len(protein_ecfp) == depth_protein + 1
# fillvalue is parameter from zip_longest
# it's used, when ligand_ecfp and protein_ecfp are not the same size,
# so if one is shorter the last given ECFP is used
if depth_ligand < depth_protein:
fillvalue = depth_ligand, ligand_ecfp[-1]
else:
fillvalue = depth_protein, protein_ecfp[-1]
for (ligand_depth, ligand_bit), (protein_depth, protein_bit) in zip_longest(
enumerate(ligand_ecfp), enumerate(protein_ecfp), fillvalue=fillvalue):
result.append(hash32((ligand_bit, protein_bit)))
if bits_info is not None:
bit_info_content.append(PLEC_bit_info_record(
ligand_root_atom_idx=ligand_atom,
ligand_depth=ligand_depth,
protein_root_atom_idx= protein_atom,
protein_depth=protein_depth
))
# folding and sorting
plec = fold(np.array(result), size=size)
# add bits info after folding
if bits_info is not None:
sort_indexes = np.argsort(plec)
plec = plec[sort_indexes].astype(np.min_scalar_type(size))
# sort bit info according to folded PLEC
for bit_number, bit_info_idx in zip(plec, sort_indexes):
if bit_number not in bits_info:
bits_info[bit_number] = set()
bits_info[bit_number].add(bit_info_content[bit_info_idx])
else:
plec = np.sort(plec).astype(np.min_scalar_type(size))
# count_bits
if not count_bits:
plec = np.unique(plec)
# sparse or dense FP
if not sparse:
plec = sparse_to_dense(plec, size=size)
return plec, protein_hash
def dice(a, b, sparse=False):
"""Calculates the Dice coefficient, the ratio of the bits in common to
the arithmetic mean of the number of 'on' bits in the two fingerprints.
Supports integer and boolean fingerprints.
Parameters
----------
a, b : numpy array
Interaction fingerprints, which are compared
in order to determine similarity.
sparse : bool (default=False)
Type of FPs to use. Defaults to dense form.
Returns
-------
score : float
Similarity between a, b.
"""
if sparse:
a_unique, a_counts = np.unique(a, return_counts=True)
b_unique, b_counts = np.unique(b, return_counts=True)
a_b_intersection = np.intersect1d(
a_unique, b_unique, assume_unique=True)
a_b = np.minimum(a_counts[np.in1d(a_unique, a_b_intersection)],
b_counts[np.in1d(b_unique, a_b_intersection)]).sum()
denominator = len(a) + len(b)
if denominator > 0:
return 2 * a_b.astype(float) / denominator
else:
a_b = np.vstack((a, b)).min(axis=0).sum()
denominator = a.sum() + b.sum()
if denominator > 0:
return 2 * a_b.astype(float) / denominator
return 0.
def tanimoto(a, b, sparse=False):
"""Tanimoto coefficient, supports boolean fingerprints.
Integer fingerprints are casted to boolean.
Parameters
----------
a, b : numpy array
Interaction fingerprints, which are compared
in order to determine similarity.
sparse : bool | |
import open3d as o3d
import os, sys
import argparse
import random
from tqdm import tqdm
import numpy as np
import pandas as pd
import cv2
import torch
import pickle
import time
from pathlib import Path
from omegaconf import DictConfig, OmegaConf
from torch.utils.data import Dataset, DataLoader
from ossid.models.dtoid import DtoidNet
from ossid.models.maskrcnn import MaskRCNN
from ossid.datasets import getDataloaders
from ossid.datasets.utils import collate_fn
from ossid.utils import expandBox, dict_to, to_np, move_to
from ossid.utils.bop_utils import saveResultsBop
from ossid.utils.zephyr_utils import networkInference
from ossid.config import OSSID_CKPT_ROOT, OSSID_DATA_ROOT, BOP_RESULTS_FOLDER, OSSID_RESULT_ROOT, BOP_DATASETS_ROOT, OSSID_DET_ROOT
from ossid.utils.detection import saveLmoYcbvGT, evalFinetuneResults
from zephyr.datasets.score_dataset import ScoreDataset
from zephyr.models.pointnet2 import PointNet2SSG
from zephyr.options import getOptions, checkArgs
from zephyr.utils import depth2cloud, meta2K, K2meta, projectPointsUv
from zephyr.utils.metrics import add, adi
from zephyr.utils.bop_dataset import BopDataset, BopDatasetArgs
from zephyr.utils.halcon_wrapper import PPFModel
from zephyr.utils.renderer import Renderer, blend
from zephyr.utils.icp import icpRefinement
from zephyr.constants import OBJECT_DIAMETERES
from zephyr.data_util import hypoShiftYcbv2BopBatch, modelPointsShiftYcbv2Bop, modelShiftBopYcbv
from zephyr.full_pipeline.model_featurization import FeatureModel
from zephyr.full_pipeline.scene_featurization import featurizeScene
from bop_toolkit_lib.visibility import estimate_visib_mask_gt
from bop_toolkit_lib.misc import ensure_dir, depth_im_to_dist_im_fast
import faulthandler
faulthandler.enable()
def makeFolder(folder):
if not os.path.exists(folder):
os.makedirs(folder)
def getFeaturizedModels(dataset):
from zephyr.full_pipeline.options import getOptions
parser = getOptions()
args = parser.parse_args([])
args.bop_root = dataset.bop_root
args.dataset_name = dataset.dataset_name
args.grid_dir_name = "grid"
args.sampled_model_dir_name = "model_pc"
args.grid_indices_path = os.path.join(args.bop_root, args.dataset_name, args.grid_dir_name, "verts_grid_0.npy")
dataset.dataset_camera["fx"] = dataset.dataset_camera['K'][0,0]
dataset.dataset_camera["fy"] = dataset.dataset_camera['K'][1,1]
dataset.dataset_camera["cx"] = dataset.dataset_camera['K'][0,2]
dataset.dataset_camera["cy"] = dataset.dataset_camera['K'][1,2]
featured_objects = {}
for obj_id in dataset.obj_ids:
is_sym = obj_id in dataset.sym_obj_ids
obj = FeatureModel(dataset.dataset_root, is_sym, args, create_index=True)
obj.construct(obj_id, dataset.getObjPath(obj_id), dataset.dataset_camera)
featured_objects[obj_id] = obj
return featured_objects
def main(main_args):
torch.manual_seed(42)
np.random.seed(42)
random.seed(42)
DATASET_NAME = main_args.dataset_name
DTOID_CONFIDENT_THRESHOLD = 0.5
ZEPHYR_CONFIDENT_THRESHOLD = 20
SAVE_ROOT = OSSID_RESULT_ROOT
assert not (main_args.ignore_dtoid_mask and main_args.always_dtoid_mask)
makeFolder(SAVE_ROOT)
makeFolder(BOP_RESULTS_FOLDER)
next_finetune_number = main_args.finetune_interval
'''Initialize the trained DTOID model'''
# Use the DTOID network
if main_args.dtoid_weights_path is not None:
ckpt_v = int(main_args.dtoid_weights_path.split("/")[-2].split("_")[1][1:])
ckpt_path = Path(main_args.dtoid_weights_path)
conf_path = ckpt_path.parent.parent / ("config_v%d.yaml" % ckpt_v)
elif DATASET_NAME == 'lmo':
conf_path = os.path.join(OSSID_CKPT_ROOT, "dtoid_conf_lmo.yaml")
if main_args.use_offline_model:
ckpt_path = os.path.join(OSSID_CKPT_ROOT, "dtoid_transductive_lmo.ckpt")
else:
ckpt_path = os.path.join(OSSID_CKPT_ROOT, "dtoid_pretrained.ckpt")
elif DATASET_NAME == 'ycbv':
conf_path = os.path.join(OSSID_CKPT_ROOT, "dtoid_conf_ycbv.yaml")
if main_args.use_offline_model:
ckpt_path = os.path.join(OSSID_CKPT_ROOT, "dtoid_transductive_ycbv.ckpt")
else:
ckpt_path = os.path.join(OSSID_CKPT_ROOT, "dtoid_pretrained.ckpt")
ossid_args = OmegaConf.load(conf_path)
# Override arguments by use-provided directories
ossid_args.dataset.bop_root = BOP_DATASETS_ROOT
ossid_args.model.pretrained_dtoid_path = os.path.join(OSSID_CKPT_ROOT, "dtoid_pretrained_original.pth.tar")
if DATASET_NAME == 'ycbv':
ossid_args.dataset.grid_root = os.path.join(OSSID_DATA_ROOT, "templates_YCBV_BOP")
ossid_args.dataset.zephyr_result_path = os.path.join(OSSID_DATA_ROOT, "test_ycbv_boptest_zephyr_result_unseen.pkl")
elif DATASET_NAME == 'lmo':
ossid_args.dataset.grid_root = os.path.join(OSSID_DATA_ROOT, "templates_LMO_DTOID")
ossid_args.dataset.zephyr_result_path = os.path.join(OSSID_DATA_ROOT, "lmo_boptest_zephyr_result.pkl")
# Use the DTOID provided by original authors (https://github.com/jpmerc/DTOID)
# This model was trained also on YCB-V objects, and thus can only be used to evaluate on LM-O.
ossid_args.model.use_pretrained_dtoid = main_args.use_pretrained_dtoid
ossid_args.dataset.test_dataset_name = main_args.dataset_name
ossid_args.dataset.train_dataset_name = main_args.dataset_name
# Keep all the zephyr results for the training set
ossid_args.dataset.zephyr_filter_key = None
ossid_args.dataset.zephyr_results_percent = 1
# use more templates for training
ossid_args.dataset.train_local_template_sample_from = 10
if main_args.n_local_test is not None:
ossid_args.dataset.n_local_test = main_args.n_local_test
elif main_args.use_pretrained_dtoid: # If their weights are used
ossid_args.dataset.n_local_test = 160
else: # If our weights are used
ossid_args.dataset.n_local_test = 10
print("Number of local templates =", ossid_args.dataset.n_local_test)
train_loader, valid_loader, test_loader = getDataloaders(ossid_args)
# Sort the test loader
test_loader.dataset.sortTargets(reverse=main_args.backward)
ModelClass = DtoidNet
model = DtoidNet(ossid_args)
if main_args.use_pretrained_dtoid:
# DTOID weightes provided by the authors will be loaded
print("Loading DTOID weights provided by the original authors")
pass
elif ckpt_path is not None:
print("Loading DTOID Model weights from", ckpt_path)
ckpt = torch.load(ckpt_path)
model.load_state_dict(ckpt['state_dict'])
initial_state_dict = model.state_dict()
model = model.to(0)
model = model.eval()
'''Initialize the trained Zephyr model'''
if DATASET_NAME == 'lmo':
CKPT_PATH = os.path.join(OSSID_CKPT_ROOT, "final_lmo.ckpt") # The path to the checkpoint
USE_ICP = False # Not using ICP for LMO dataset, as it only uses PPF hypotheses, which are already after ICP processing.
MODEL_DATA_TPATH = os.path.join(OSSID_DATA_ROOT, "zephyr_model_data", "lmo", "model_cloud_{:02d}.npz") # path template to the sampled point cloud
INCONST_RATIO_TH = 100
elif DATASET_NAME == 'ycbv':
if main_args.test_seen:
CKPT_PATH_FOR_ODD = os.path.join(OSSID_CKPT_ROOT, "final_ycbv.ckpt")
CKPT_PATH_FOR_EVEN = os.path.join(OSSID_CKPT_ROOT, "final_ycbv_valodd.ckpt")
else:
CKPT_PATH_FOR_ODD = os.path.join(OSSID_CKPT_ROOT, "final_ycbv_valodd.ckpt")
CKPT_PATH_FOR_EVEN = os.path.join(OSSID_CKPT_ROOT, "final_ycbv.ckpt")
USE_ICP = True # using ICP for LMO dataset
MODEL_DATA_TPATH = os.path.join(OSSID_DATA_ROOT, "zephyr_model_data", "ycbv", "model_cloud_{:02d}.npz") # path template to the sampled point cloud
INCONST_RATIO_TH = 10
'''Set up the arguments for the model'''
parser = getOptions()
zephyr_args = parser.parse_args([])
# Model-related
zephyr_args.model_name = "pn2"
zephyr_args.dataset = "HSVD_diff_uv_norm"
zephyr_args.no_valid_proj = True
zephyr_args.no_valid_depth = True
zephyr_args.inconst_ratio_th = INCONST_RATIO_TH
# Dataset-related
zephyr_args.dataset_root = [""]
zephyr_args.dataset_name = [DATASET_NAME]
# zephyr_args.resume_path = CKPT_PATH
zephyr_args.test_dataset = True
'''Initialize pytorch dataloader and model'''
# dataloader is only needed for the getPointNetData() function
# zephyr_loader = getDataloader(zephyr_args)[0]
zephyr_dataset = ScoreDataset([], "", DATASET_NAME, zephyr_args, mode='test')
zephyr_args.dim_point = zephyr_dataset.dim_point
zephyr_args.unseen_oids = []
zephyr_args.extra_bottleneck_dim = 0
if main_args.dataset_name == "ycbv":
zephyr_model = PointNet2SSG(zephyr_args.dim_point, zephyr_args, num_class=1)
zephyr_ckpt = torch.load(CKPT_PATH_FOR_ODD)
zephyr_model.load_state_dict(zephyr_ckpt['state_dict'])
zephyr_model = zephyr_model.to(0).eval()
zephyr_model_for_odd = zephyr_model
zephyr_model = PointNet2SSG(zephyr_args.dim_point, zephyr_args, num_class=1)
zephyr_ckpt = torch.load(CKPT_PATH_FOR_EVEN)
zephyr_model.load_state_dict(zephyr_ckpt['state_dict'])
zephyr_model = zephyr_model.to(0).eval()
zephyr_model_for_even = zephyr_model
else:
zephyr_model = PointNet2SSG(zephyr_args.dim_point, zephyr_args, num_class=1)
zephyr_ckpt = torch.load(CKPT_PATH)
zephyr_model.load_state_dict(zephyr_ckpt['state_dict'])
zephyr_model = zephyr_model.to(0).eval()
'''Initialize the BOP dataset'''
# Set up the options
bop_args = BopDatasetArgs(
bop_root=BOP_DATASETS_ROOT,
dataset_name=DATASET_NAME,
model_type=None,
split_name="bop_test", # This indicates we want to use the testing set defined in BOP challenge (different than original test set)
split="test",
split_type=None,
ppf_results_file=None,
skip=1, # Iterate over all test samples, with no skipping
)
bop_dataset = BopDataset(bop_args)
print("Length of the test dataset:", len(bop_dataset))
'''Load the zephyr results'''
zephyr_results = pickle.load(open(ossid_args.dataset.zephyr_result_path, 'rb'))
zephyr_results = {(r['obj_id'], r['scene_id'], r['im_id']):r for r in zephyr_results}
# Extract the training dataset from the training loader
train_dtoid_bop_dataset = train_loader.dataset
train_dtoid_bop_dataset.clearTargets()
# Recover from the training/validation split on zephyr results
train_dtoid_bop_dataset.zephyr_results = zephyr_results
'''optimizer for dtoid model'''
optimizer = torch.optim.Adam(
model.parameters(),
lr = 1e-4,
weight_decay = 1e-6,
amsgrad = True
)
'''Test the DTOID model before finetuning'''
if main_args.raw_dtoid:
print("Testing the DTOID model before finetuning")
test_results = testDtoidModel(model, test_loader)
save_path = os.path.join(SAVE_ROOT, "before_finetune_dtoid_results_%s.pkl" % main_args.exp_name)
print("Saving results to", save_path)
pickle.dump({
"test_results": test_results,
"main_args": main_args,
}, open(save_path, 'wb'))
df = pd.DataFrame.from_dict(test_results)
print("DTOID mean IoU:", df['dtoid_iou'].mean())
print("DTOID Valid IoU recall", (df['dtoid_iou'] > 0.5).astype(float).mean())
return 0
if main_args.use_sift_hypos:
# Initialize the featured model for YCB-V dataset
featured_objects = getFeaturizedModels(bop_dataset)
'''main loop'''
test_results = []
finetune_logs = []
renderers = {}
# Create the surface model (PPF training stage)
print("Creating PPF models using Halcon")
ppf_models = {}
for obj_id in bop_dataset.obj_ids:
full_model_path = bop_dataset.model_tpath.format(obj_id=obj_id)
if DATASET_NAME == 'ycbv':
ppf_models[obj_id] = PPFModel(full_model_path, ModelSamplingDist = 0.03)
else:
ppf_models[obj_id] = PPFModel(full_model_path)
# Preloading all model data
print("Preloading all model data")
model_data_all = {}
for obj_id in bop_dataset.obj_ids:
# Load the information of the model point cloud from the pre-processed dataset
model_data_path = MODEL_DATA_TPATH.format(obj_id)
model_data = np.load(model_data_path)
model_points, model_colors, model_normals = model_data['model_points'], model_data['model_colors'], model_data['model_normals']
model_data_all[obj_id] = (model_points, model_colors, model_normals)
# The batch is the data for dtoid dataset
for iteration, batch in tqdm(enumerate(test_loader), total=len(test_loader)):
obj_id, scene_id, im_id = batch['obj_id'].item(), batch['scene_id'].item(), batch['im_id'].item()
zr = zephyr_results[(obj_id, scene_id, im_id)]
# Get the full mesh model provided by LineMOD dataset
full_model_path = bop_dataset.model_tpath.format(obj_id=obj_id)
# Get the raw data from the bop dataset, preparing for zephyr inference
bop_data = bop_dataset.getDataByIds(obj_id, scene_id, im_id)
# Extract the data from the bop datapoint
img, depth, scene_camera = bop_data['img'], bop_data['depth'], bop_data['scene_camera']
scene_meta = bop_data['scene_meta']
mat_gt = bop_data['mat_gt']
cam_K = np.asarray(scene_camera['cam_K']).reshape((3, 3))
# Load the information of the model point cloud from the pre-processed dataset
model_points, model_colors, model_normals = model_data_all[obj_id]
# Get the proper error function according to whether the object is symmetric or not
is_sym = obj_id in bop_dataset.sym_obj_ids
if main_args.fast:
err_func = add
else:
err_func = adi if is_sym else add
# DTOID inference first
dict_to(batch, 0)
with torch.no_grad():
model = model.eval()
t1 = time.time()
out = model.forwardTestTime(batch)
time_dtoid = time.time() - t1
final_bbox = to_np(out['final_bbox'][0])
final_score = to_np(out['final_score'][0])
dtoid_iou = to_np(out['seg_IoU'])
dtoid_pred_mask = to_np(out['segmentation'][0,0])
dtoid_confident = final_score[0] > DTOID_CONFIDENT_THRESHOLD
use_dtoid_mask = False
if main_args.ignore_dtoid_mask:
use_dtoid_mask = False
elif main_args.always_dtoid_mask:
use_dtoid_mask = True
else:
use_dtoid_mask = dtoid_confident
if iteration < main_args.finetune_warmup:
use_dtoid_mask = False
if not use_dtoid_mask:
# Run zephyr on the whole image
# Here we just get the stored zephyr results
zephyr_score = zr['score']
zephyr_mask = zr['pred_mask_visib']
zephyr_pose = zr['pred_pose']
pred_pose = to_np(zephyr_pose)
pred_score = zephyr_score
time_ppf = None
time_sift = None
time_zephyr = None
time_icp = None
else:
# Take the prediction and run zephyr on the predicted mask
# Get the mask according to dtoid | |
from __future__ import unicode_literals
import json
import datetime
import pytz
import requests
from dateutil.relativedelta import relativedelta
from django.contrib.gis.db.models.fields import PointField
from django.contrib.gis.db.models.manager import GeoManager
from django.contrib.gis.geos import GEOSGeometry
from django.contrib.gis.measure import Distance
from django.db import models,transaction
from django.contrib.gis.db import models as gis_models
from django.db.models import Q
from django.dispatch import receiver
from django.db.models.signals import pre_delete, post_save
from django.utils.encoding import python_2_unicode_compatible
from django.core.exceptions import ValidationError
from django.contrib.postgres.fields.jsonb import JSONField
from django.utils import timezone
from ledger.checkout.utils import createCustomBasket
from ledger.payments.invoice.utils import CreateInvoiceBasket
from ledger.settings_base import TIME_ZONE
from rest_framework import serializers
from taggit.models import TaggedItemBase
from ledger.accounts.models import EmailUser, RevisionedMixin
from ledger.payments.models import Invoice
from disturbance import exceptions
# from disturbance.components.approvals.models import ApiarySiteOnApproval
from disturbance.components.organisations.models import Organisation
from disturbance.components.main.models import CommunicationsLogEntry, UserAction, Document, Region, District, \
ApplicationType, RegionDbca, DistrictDbca, CategoryDbca
from disturbance.components.main.utils import get_department_user
from disturbance.components.proposals.email import (
send_referral_email_notification,
send_apiary_referral_email_notification,
send_apiary_referral_complete_email_notification,
send_proposal_decline_email_notification,
send_proposal_approval_email_notification,
send_amendment_email_notification,
send_submit_email_notification,
send_external_submit_email_notification,
send_approver_decline_email_notification,
send_approver_approve_email_notification,
send_referral_complete_email_notification,
send_proposal_approver_sendback_email_notification,
send_referral_recall_email_notification,
send_site_transfer_approval_email_notification,
)
from disturbance.ordered_model import OrderedModel
import copy
import subprocess
import logging
from disturbance.settings import SITE_STATUS_DRAFT, SITE_STATUS_PENDING, SITE_STATUS_APPROVED, SITE_STATUS_DENIED, \
SITE_STATUS_CURRENT, RESTRICTED_RADIUS, SITE_STATUS_TRANSFERRED, PAYMENT_SYSTEM_ID, PAYMENT_SYSTEM_PREFIX
logger = logging.getLogger(__name__)
def update_proposal_doc_filename(instance, filename):
return 'proposals/{}/documents/{}'.format(instance.proposal.id,filename)
def update_proposal_comms_log_filename(instance, filename):
return 'proposals/{}/communications/{}/{}'.format(instance.log_entry.proposal.id,instance.id,filename)
def update_amendment_request_doc_filename(instance, filename):
return 'proposals/{}/amendment_request_documents/{}'.format(instance.amendment_request.proposal.id,filename)
def update_apiary_doc_filename(instance, filename):
return 'proposals/{}/apiary_documents/{}'.format(instance.apiary_documents.proposal.id, filename)
#def update_temporary_use_doc_filename(instance, filename):
# return 'proposals/{}/apiary_temporary_use_documents/{}'.format(instance.apiary_temporary_use.proposal.id, filename)
#
#def update_site_transfer_doc_filename(instance, filename):
# return 'proposals/{}/apiary_site_transfer_documents/{}'.format(instance.apiary_site_transfer.proposal.id, filename)
def application_type_choicelist():
try:
#return [( (choice.name), (choice.name) ) for choice in ApplicationType.objects.filter(visible=True)]
return [( (choice.name), (choice.name) ) for choice in ApplicationType.objects.all()]
except:
# required because on first DB tables creation, there are no ApplicationType objects -- setting a default value
return ( ('Disturbance', 'Disturbance'), )
class ProposalType(models.Model):
description = models.CharField(max_length=256, blank=True, null=True)
name = models.CharField(verbose_name='Application name (eg. Disturbance, Apiary)', max_length=64, choices=application_type_choicelist(), default='Disturbance')
schema = JSONField()
replaced_by = models.ForeignKey('self', on_delete=models.PROTECT, blank=True, null=True)
version = models.SmallIntegerField(default=1, blank=False, null=False)
# domain_used = models.CharField(max_length=40, choices=DOMAIN_USED_CHOICES, default=DOMAIN_USED_CHOICES[0][0])
def __str__(self):
return '{} - v{}'.format(self.name, self.version)
class Meta:
app_label = 'disturbance'
unique_together = ('name', 'version')
class TaggedProposalAssessorGroupRegions(TaggedItemBase):
content_object = models.ForeignKey("ProposalAssessorGroup")
class Meta:
app_label = 'disturbance'
class TaggedProposalAssessorGroupActivities(TaggedItemBase):
content_object = models.ForeignKey("ProposalAssessorGroup")
class Meta:
app_label = 'disturbance'
class ProposalAssessorGroup(models.Model):
name = models.CharField(max_length=255)
#members = models.ManyToManyField(EmailUser,blank=True)
#regions = TaggableManager(verbose_name="Regions",help_text="A comma-separated list of regions.",through=TaggedProposalAssessorGroupRegions,related_name = "+",blank=True)
#activities = TaggableManager(verbose_name="Activities",help_text="A comma-separated list of activities.",through=TaggedProposalAssessorGroupActivities,related_name = "+",blank=True)
members = models.ManyToManyField(EmailUser)
region = models.ForeignKey(Region, null=True, blank=True)
default = models.BooleanField(default=False)
class Meta:
app_label = 'disturbance'
def __str__(self):
return self.name
def clean(self):
try:
default = ProposalAssessorGroup.objects.get(default=True)
except ProposalAssessorGroup.DoesNotExist:
default = None
if self.pk:
if not self.default and not self.region:
raise ValidationError('Only default can have no region set for proposal assessor group. Please specifiy region')
# elif default and not self.default:
# raise ValidationError('There can only be one default proposal assessor group')
else:
if default and self.default:
raise ValidationError('There can only be one default proposal assessor group')
def member_is_assigned(self,member):
for p in self.current_proposals:
if p.assigned_officer == member:
return True
return False
@property
def current_proposals(self):
assessable_states = ['with_assessor','with_referral','with_assessor_requirements']
return Proposal.objects.filter(processing_status__in=assessable_states)
@property
def members_email(self):
return [i.email for i in self.members.all()]
class TaggedProposalApproverGroupRegions(TaggedItemBase):
content_object = models.ForeignKey("ProposalApproverGroup")
class Meta:
app_label = 'disturbance'
class TaggedProposalApproverGroupActivities(TaggedItemBase):
content_object = models.ForeignKey("ProposalApproverGroup")
class Meta:
app_label = 'disturbance'
class ProposalApproverGroup(models.Model):
name = models.CharField(max_length=255)
#members = models.ManyToManyField(EmailUser,blank=True)
#regions = TaggableManager(verbose_name="Regions",help_text="A comma-separated list of regions.",through=TaggedProposalApproverGroupRegions,related_name = "+",blank=True)
#activities = TaggableManager(verbose_name="Activities",help_text="A comma-separated list of activities.",through=TaggedProposalApproverGroupActivities,related_name = "+",blank=True)
members = models.ManyToManyField(EmailUser)
region = models.ForeignKey(Region, null=True, blank=True)
default = models.BooleanField(default=False)
class Meta:
app_label = 'disturbance'
def __str__(self):
return self.name
def clean(self):
try:
default = ProposalApproverGroup.objects.get(default=True)
except ProposalApproverGroup.DoesNotExist:
default = None
if self.pk:
if not self.default and not self.region:
raise ValidationError('Only default can have no region set for proposal assessor group. Please specifiy region')
# if int(self.pk) != int(default.id):
# if default and self.default:
# raise ValidationError('There can only be one default proposal approver group')
else:
if default and self.default:
raise ValidationError('There can only be one default proposal approver group')
def member_is_assigned(self,member):
for p in self.current_proposals:
if p.assigned_approver == member:
return True
return False
@property
def current_proposals(self):
assessable_states = ['with_approver']
return Proposal.objects.filter(processing_status__in=assessable_states)
@property
def members_email(self):
return [i.email for i in self.members.all()]
class DefaultDocument(Document):
input_name = models.CharField(max_length=255,null=True,blank=True)
can_delete = models.BooleanField(default=True) # after initial submit prevent document from being deleted
visible = models.BooleanField(default=True) # to prevent deletion on file system, hidden and still be available in history
class Meta:
app_label = 'disturbance'
abstract =True
def delete(self):
if self.can_delete:
return super(DefaultDocument, self).delete()
logger.info('Cannot delete existing document object after Application has been submitted (including document submitted before Application pushback to status Draft): {}'.format(self.name))
class ProposalDocument(Document):
proposal = models.ForeignKey('Proposal',related_name='documents')
_file = models.FileField(upload_to=update_proposal_doc_filename, max_length=500)
input_name = models.CharField(max_length=255,null=True,blank=True)
can_delete = models.BooleanField(default=True) # after initial submit prevent document from being deleted
can_hide= models.BooleanField(default=False) # after initial submit, document cannot be deleted but can be hidden
hidden=models.BooleanField(default=False) # after initial submit prevent document from being deleted
def delete(self):
if self.can_delete:
return super(ProposalDocument, self).delete()
logger.info('Cannot delete existing document object after Proposal has been submitted (including document submitted before Proposal pushback to status Draft): {}'.format(self.name))
class Meta:
app_label = 'disturbance'
class Proposal(RevisionedMixin):
CUSTOMER_STATUS_TEMP = 'temp'
CUSTOMER_STATUS_DRAFT = 'draft'
CUSTOMER_STATUS_WITH_ASSESSOR = 'with_assessor'
CUSTOMER_STATUS_AMENDMENT_REQUEST = 'amendment_required'
CUSTOMER_STATUS_APPROVED = 'approved'
CUSTOMER_STATUS_DECLINED = 'declined'
CUSTOMER_STATUS_DISCARDED = 'discarded'
CUSTOMER_STATUS_CHOICES = ((CUSTOMER_STATUS_TEMP, 'Temporary'),
(CUSTOMER_STATUS_DRAFT, 'Draft'),
(CUSTOMER_STATUS_WITH_ASSESSOR, 'Under Review'),
(CUSTOMER_STATUS_AMENDMENT_REQUEST, 'Amendment Required'),
(CUSTOMER_STATUS_APPROVED, 'Approved'),
(CUSTOMER_STATUS_DECLINED, 'Declined'),
(CUSTOMER_STATUS_DISCARDED, 'Discarded'),
)
# List of statuses from above that allow a customer to edit an application.
CUSTOMER_EDITABLE_STATE = [CUSTOMER_STATUS_TEMP, CUSTOMER_STATUS_DRAFT, CUSTOMER_STATUS_AMENDMENT_REQUEST, ]
APPLICANT_TYPE_ORGANISATION = 'organisation'
APPLICANT_TYPE_PROXY = 'proxy' # proxy also represents an individual making an Apiary application
APPLICANT_TYPE_SUBMITTER = 'submitter'
# List of statuses from above that allow a customer to view an application (read-only)
CUSTOMER_VIEWABLE_STATE = ['with_assessor', 'under_review', 'id_required', 'returns_required', 'approved', 'declined']
PROCESSING_STATUS_TEMP = 'temp'
PROCESSING_STATUS_DRAFT = 'draft'
PROCESSING_STATUS_WITH_ASSESSOR = 'with_assessor'
PROCESSING_STATUS_WITH_REFERRAL = 'with_referral'
PROCESSING_STATUS_WITH_ASSESSOR_REQUIREMENTS = 'with_assessor_requirements'
PROCESSING_STATUS_WITH_APPROVER = 'with_approver'
PROCESSING_STATUS_RENEWAL = 'renewal'
PROCESSING_STATUS_LICENCE_AMENDMENT = 'licence_amendment'
PROCESSING_STATUS_AWAITING_APPLICANT_RESPONSE = 'awaiting_applicant_response'
PROCESSING_STATUS_AWAITING_ASSESSOR_RESPONSE = 'awaiting_assessor_response'
PROCESSING_STATUS_AWAITING_RESPONSES = 'awaiting_responses'
PROCESSING_STATUS_READY_FOR_CONDITIONS = 'ready_for_conditions'
PROCESSING_STATUS_READY_TO_ISSUE = 'ready_to_issue'
PROCESSING_STATUS_APPROVED = 'approved'
PROCESSING_STATUS_DECLINED = 'declined'
PROCESSING_STATUS_DISCARDED = 'discarded'
PROCESSING_STATUS_CHOICES = ((PROCESSING_STATUS_TEMP, 'Temporary'),
(PROCESSING_STATUS_DRAFT, 'Draft'),
(PROCESSING_STATUS_WITH_ASSESSOR, 'With Assessor'),
(PROCESSING_STATUS_WITH_REFERRAL, 'With Referral'),
(PROCESSING_STATUS_WITH_ASSESSOR_REQUIREMENTS, 'With Assessor (Requirements)'),
(PROCESSING_STATUS_WITH_APPROVER, 'With Approver'),
(PROCESSING_STATUS_RENEWAL, 'Renewal'),
(PROCESSING_STATUS_LICENCE_AMENDMENT, 'Licence Amendment'),
(PROCESSING_STATUS_AWAITING_APPLICANT_RESPONSE, 'Awaiting Applicant Response'),
(PROCESSING_STATUS_AWAITING_ASSESSOR_RESPONSE, 'Awaiting Assessor Response'),
(PROCESSING_STATUS_AWAITING_RESPONSES, 'Awaiting Responses'),
(PROCESSING_STATUS_READY_FOR_CONDITIONS, 'Ready for Conditions'),
(PROCESSING_STATUS_READY_TO_ISSUE, 'Ready to Issue'),
(PROCESSING_STATUS_APPROVED, 'Approved'),
(PROCESSING_STATUS_DECLINED, 'Declined'),
(PROCESSING_STATUS_DISCARDED, 'Discarded'),
)
ID_CHECK_STATUS_CHOICES = (('not_checked', 'Not Checked'), ('awaiting_update', 'Awaiting Update'),
('updated', 'Updated'), ('accepted', 'Accepted'))
COMPLIANCE_CHECK_STATUS_CHOICES = (
('not_checked', 'Not Checked'), ('awaiting_returns', 'Awaiting Returns'), ('completed', 'Completed'),
('accepted', 'Accepted'))
CHARACTER_CHECK_STATUS_CHOICES = (
('not_checked', 'Not Checked'), ('accepted', 'Accepted'))
REVIEW_STATUS_CHOICES = (
('not_reviewed', 'Not Reviewed'), ('awaiting_amendments', 'Awaiting Amendments'), ('amended', 'Amended'),
('accepted', 'Accepted'))
# PROPOSAL_STATE_NEW_LICENCE = 'New Licence'
# PROPOSAL_STATE_AMENDMENT = 'Amendment'
# PROPOSAL_STATE_RENEWAL = 'Renewal'
# PROPOSAL_STATE_CHOICES = (
# (1, PROPOSAL_STATE_NEW_LICENCE),
# (2, PROPOSAL_STATE_AMENDMENT),
# (3, PROPOSAL_STATE_RENEWAL),
# )
APPLICATION_TYPE_CHOICES = (
('new_proposal', 'New Proposal'),
('amendment', 'Amendment'),
('renewal', 'Renewal'),
)
proposal_type = models.CharField('Proposal Type', max_length=40, choices=APPLICATION_TYPE_CHOICES,
default=APPLICATION_TYPE_CHOICES[0][0])
#proposal_state = models.PositiveSmallIntegerField('Proposal state', choices=PROPOSAL_STATE_CHOICES, default=1)
data = JSONField(blank=True, null=True)
assessor_data = JSONField(blank=True, null=True)
comment_data = JSONField(blank=True, null=True)
schema = JSONField(blank=False, null=False)
proposed_issuance_approval = JSONField(blank=True, null=True)
#hard_copy = models.ForeignKey(Document, blank=True, null=True, related_name='hard_copy')
customer_status = models.CharField('Customer Status', max_length=40, choices=CUSTOMER_STATUS_CHOICES,
default=CUSTOMER_STATUS_CHOICES[1][0])
applicant = models.ForeignKey(Organisation, blank=True, null=True, related_name='proposals')
lodgement_number = models.CharField(max_length=9, blank=True, default='')
lodgement_sequence = models.IntegerField(blank=True, default=0)
#lodgement_date = models.DateField(blank=True, null=True)
lodgement_date = models.DateTimeField(blank=True, null=True)
# 20200512 - proxy_applicant also represents an individual making an Apiary application
proxy_applicant = models.ForeignKey(EmailUser, blank=True, null=True, related_name='disturbance_proxy')
submitter = models.ForeignKey(EmailUser, blank=True, null=True, related_name='disturbance_proposals')
assigned_officer = models.ForeignKey(EmailUser, blank=True, null=True, related_name='disturbance_proposals_assigned', on_delete=models.SET_NULL)
assigned_approver = models.ForeignKey(EmailUser, blank=True, null=True, related_name='disturbance_proposals_approvals', on_delete=models.SET_NULL)
processing_status = models.CharField('Processing Status', max_length=30, choices=PROCESSING_STATUS_CHOICES,
default=PROCESSING_STATUS_CHOICES[1][0])
id_check_status = models.CharField('Identification Check Status', max_length=30, choices=ID_CHECK_STATUS_CHOICES,
default=ID_CHECK_STATUS_CHOICES[0][0])
compliance_check_status = models.CharField('Return Check Status', max_length=30, choices=COMPLIANCE_CHECK_STATUS_CHOICES,
default=COMPLIANCE_CHECK_STATUS_CHOICES[0][0])
character_check_status = models.CharField('Character Check Status', max_length=30,
choices=CHARACTER_CHECK_STATUS_CHOICES,
default=CHARACTER_CHECK_STATUS_CHOICES[0][0])
review_status = models.CharField('Review Status', max_length=30, choices=REVIEW_STATUS_CHOICES,
default=REVIEW_STATUS_CHOICES[0][0])
approval = models.ForeignKey('disturbance.Approval',null=True,blank=True)
previous_application = models.ForeignKey('self', on_delete=models.PROTECT, blank=True, null=True)
#self_clone = models.ForeignKey('self', on_delete=models.SET_NULL, blank=True, null=True, related_name='proposal_current_state')
proposed_decline_status = models.BooleanField(default=False)
# Special Fields
title = models.CharField(max_length=255,null=True,blank=True)
activity = models.CharField(max_length=255,null=True,blank=True)
#region = models.CharField(max_length=255,null=True,blank=True)
tenure = models.CharField(max_length=255,null=True,blank=True)
#activity = models.ForeignKey(Activity, null=True, blank=True)
region = models.ForeignKey(Region, null=True, blank=True)
district = models.ForeignKey(District, null=True, blank=True)
#tenure = models.ForeignKey(Tenure, null=True, blank=True)
application_type = models.ForeignKey(ApplicationType)
approval_level = models.CharField('Activity matrix approval level', max_length=255,null=True,blank=True)
approval_level_document = models.ForeignKey(ProposalDocument, blank=True, null=True, related_name='approval_level_document')
approval_level_comment = models.TextField(blank=True)
approval_comment = models.TextField(blank=True)
assessment_reminder_sent = models.BooleanField(default=False)
weekly_reminder_sent_date = models.DateField(blank=True, null=True)
sub_activity_level1 = models.CharField(max_length=255,null=True,blank=True)
sub_activity_level2 = models.CharField(max_length=255,null=True,blank=True)
management_area = models.CharField(max_length=255,null=True,blank=True)
fee_invoice_reference = models.CharField(max_length=50, null=True, blank=True, default='')
migrated = models.BooleanField(default=False)
class Meta:
app_label = 'disturbance'
#ordering = ['-id']
def __str__(self):
return str(self.id)
#Append 'P' to Proposal id to generate Lodgement number. Lodgement number and lodgement sequence are used to generate | |
<reponame>bogdankostic/flair<gh_stars>1-10
import time, datetime
import random
import sys
from pathlib import Path
from typing import Union
from torch import cuda
from torch.utils.data import Dataset, DataLoader
from torch.optim.sgd import SGD
try:
from apex import amp
except ImportError:
amp = None
import flair
from flair.data import Dictionary
from flair.models import LanguageModel
from flair.optim import *
from flair.training_utils import add_file_handler
log = logging.getLogger("flair")
class TextDataset(Dataset):
def __init__(
self,
path: Union[str, Path],
dictionary: Dictionary,
expand_vocab: bool = False,
forward: bool = True,
split_on_char: bool = True,
random_case_flip: bool = True,
document_delimiter: str = '\n',
shuffle: bool = True,
):
if type(path) is str:
path = Path(path)
assert path.exists()
self.files = None
self.path = path
self.dictionary = dictionary
self.split_on_char = split_on_char
self.forward = forward
self.random_case_flip = random_case_flip
self.expand_vocab = expand_vocab
self.document_delimiter = document_delimiter
self.shuffle = shuffle
if path.is_dir():
self.files = sorted([f for f in path.iterdir() if f.exists()])
else:
self.files = [path]
def __len__(self):
return len(self.files)
def __getitem__(self, index=0) -> torch.tensor:
"""Tokenizes a text file on character basis."""
if type(self.files[index]) is str:
self.files[index] = Path(self.files[index])
assert self.files[index].exists()
with self.files[index].open("r", encoding="utf-8") as fin:
lines = (doc + self.document_delimiter for doc in fin.read().split(self.document_delimiter) if doc)
if self.random_case_flip:
lines = map(self.random_casechange, lines)
lines = list(map(list if self.split_on_char else str.split, lines))
log.info(f"read text file with {len(lines)} lines")
if self.shuffle:
random.shuffle(lines)
log.info(f"shuffled")
if self.expand_vocab:
for chars in lines:
for char in chars:
self.dictionary.add_item(char)
ids = torch.tensor(
[self.dictionary.get_idx_for_item(char) for chars in lines for char in chars],
dtype=torch.long
)
if not self.forward:
ids = ids.flip(0)
return ids
@staticmethod
def random_casechange(line: str) -> str:
no = random.randint(0, 99)
if no == 0:
line = line.lower()
if no == 1:
line = line.upper()
return line
class TextCorpus(object):
def __init__(
self,
path: Union[Path, str],
dictionary: Dictionary,
forward: bool = True,
character_level: bool = True,
random_case_flip: bool = True,
document_delimiter: str = '\n',
):
self.dictionary: Dictionary = dictionary
self.forward = forward
self.split_on_char = character_level
self.random_case_flip = random_case_flip
self.document_delimiter: str = document_delimiter
if type(path) == str:
path = Path(path)
self.train = TextDataset(
path / "train",
dictionary,
False,
self.forward,
self.split_on_char,
self.random_case_flip,
document_delimiter=self.document_delimiter,
shuffle=True,
)
# TextDataset returns a list. valid and test are only one file, so return the first element
self.valid = TextDataset(
path / "valid.txt",
dictionary,
False,
self.forward,
self.split_on_char,
self.random_case_flip,
document_delimiter=document_delimiter,
shuffle=False,
)[0]
self.test = TextDataset(
path / "test.txt",
dictionary,
False,
self.forward,
self.split_on_char,
self.random_case_flip,
document_delimiter=document_delimiter,
shuffle=False,
)[0]
class LanguageModelTrainer:
def __init__(
self,
model: LanguageModel,
corpus: TextCorpus,
optimizer: Optimizer = SGD,
test_mode: bool = False,
epoch: int = 0,
split: int = 0,
loss: float = 10000,
optimizer_state: dict = None,
):
self.model: LanguageModel = model
self.optimizer: Optimizer = optimizer
self.corpus: TextCorpus = corpus
self.test_mode: bool = test_mode
self.loss_function = torch.nn.CrossEntropyLoss()
self.log_interval = 100
self.epoch = epoch
self.split = split
self.loss = loss
self.optimizer_state = optimizer_state
def train(
self,
base_path: Union[Path, str],
sequence_length: int,
learning_rate: float = 20,
mini_batch_size: int = 100,
anneal_factor: float = 0.25,
patience: int = 10,
clip=0.25,
max_epochs: int = 1000,
checkpoint: bool = False,
grow_to_sequence_length: int = 0,
num_workers: int = 2,
use_amp: bool = False,
amp_opt_level: str = "O1",
**kwargs,
):
if use_amp:
if sys.version_info < (3, 0):
raise RuntimeError("Apex currently only supports Python 3. Aborting.")
if amp is None:
raise RuntimeError(
"Failed to import apex. Please install apex from https://www.github.com/nvidia/apex "
"to enable mixed-precision training."
)
# cast string to Path
if type(base_path) is str:
base_path = Path(base_path)
add_file_handler(log, base_path / "training.log")
number_of_splits: int = len(self.corpus.train)
val_data = self._batchify(self.corpus.valid, mini_batch_size)
# error message if the validation dataset is too small
if val_data.size(0) == 1:
raise RuntimeError(
f"ERROR: Your validation dataset is too small. For your mini_batch_size, the data needs to "
f"consist of at least {mini_batch_size * 2} characters!"
)
base_path.mkdir(parents=True, exist_ok=True)
loss_txt = base_path / "loss.txt"
savefile = base_path / "best-lm.pt"
try:
best_val_loss = self.loss
optimizer = self.optimizer(
self.model.parameters(), lr=learning_rate, **kwargs
)
if self.optimizer_state is not None:
optimizer.load_state_dict(self.optimizer_state)
if isinstance(optimizer, (AdamW, SGDW)):
scheduler: ReduceLRWDOnPlateau = ReduceLRWDOnPlateau(
optimizer, verbose=True, factor=anneal_factor, patience=patience
)
else:
scheduler: ReduceLROnPlateau = ReduceLROnPlateau(
optimizer, verbose=True, factor=anneal_factor, patience=patience
)
if use_amp:
self.model, optimizer = amp.initialize(
self.model, optimizer, opt_level=amp_opt_level
)
training_generator = DataLoader(
self.corpus.train, shuffle=False, num_workers=num_workers
)
for epoch in range(self.epoch, max_epochs):
epoch_start_time = time.time()
# Shuffle training files randomly after serially iterating through corpus one
if epoch > 0:
training_generator = DataLoader(
self.corpus.train, shuffle=True, num_workers=num_workers
)
self.model.save_checkpoint(
base_path / f"epoch_{epoch}.pt",
optimizer,
epoch,
0,
best_val_loss,
)
# iterate through training data, starting at self.split (for checkpointing)
for curr_split, train_slice in enumerate(
training_generator, self.split
):
if sequence_length < grow_to_sequence_length:
sequence_length += 1
log.info(f"Sequence length is {sequence_length}")
split_start_time = time.time()
# off by one for printing
curr_split += 1
train_data = self._batchify(train_slice.flatten(), mini_batch_size)
log.info(
"Split %d" % curr_split
+ "\t - ({:%H:%M:%S})".format(datetime.datetime.now())
)
for group in optimizer.param_groups:
learning_rate = group["lr"]
# go into train mode
self.model.train()
# reset variables
hidden = self.model.init_hidden(mini_batch_size)
# not really sure what this does
ntokens = len(self.corpus.dictionary)
total_loss = 0
start_time = time.time()
for batch, i in enumerate(
range(0, train_data.size(0) - 1, sequence_length)
):
data, targets = self._get_batch(train_data, i, sequence_length)
if not data.is_cuda and cuda.is_available():
log.info(
"Batch %d is not on CUDA, training will be very slow"
% (batch)
)
raise Exception("data isnt on cuda")
self.model.zero_grad()
optimizer.zero_grad()
# do the forward pass in the model
output, rnn_output, hidden = self.model.forward(data, hidden)
# try to predict the targets
loss = self.loss_function(output.view(-1, ntokens), targets)
# Backward
if use_amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm_(self.model.parameters(), clip)
optimizer.step()
total_loss += loss.data
# We detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
hidden = self._repackage_hidden(hidden)
# explicitly remove loss to clear up memory
del loss, output, rnn_output
if batch % self.log_interval == 0 and batch > 0:
cur_loss = total_loss.item() / self.log_interval
elapsed = time.time() - start_time
log.info(
"| split {:3d} /{:3d} | {:5d}/{:5d} batches | ms/batch {:5.2f} | "
"loss {:5.2f} | ppl {:8.2f}".format(
curr_split,
number_of_splits,
batch,
len(train_data) // sequence_length,
elapsed * 1000 / self.log_interval,
cur_loss,
math.exp(cur_loss),
)
)
total_loss = 0
start_time = time.time()
log.info(
"%d seconds for train split %d"
% (time.time() - split_start_time, curr_split)
)
###############################################################################
self.model.eval()
val_loss = self.evaluate(val_data, mini_batch_size, sequence_length)
scheduler.step(val_loss)
log.info("best loss so far {:5.2f}".format(best_val_loss))
log.info(self.model.generate_text())
if checkpoint:
self.model.save_checkpoint(
base_path / "checkpoint.pt",
optimizer,
epoch,
curr_split,
best_val_loss,
)
# Save the model if the validation loss is the best we've seen so far.
if val_loss < best_val_loss:
self.model.best_score = best_val_loss
self.model.save(savefile)
best_val_loss = val_loss
###############################################################################
# print info
###############################################################################
log.info("-" * 89)
summary = (
"| end of split {:3d} /{:3d} | epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | "
"valid ppl {:8.2f} | learning rate {:3.4f}".format(
curr_split,
number_of_splits,
epoch + 1,
(time.time() - split_start_time),
val_loss,
math.exp(val_loss),
learning_rate,
)
)
with open(loss_txt, "a") as myfile:
myfile.write("%s\n" % summary)
log.info(summary)
log.info("-" * 89)
log.info("Epoch time: %.2f" % (time.time() - epoch_start_time))
except KeyboardInterrupt:
log.info("-" * 89)
log.info("Exiting from training early")
###############################################################################
# final testing
###############################################################################
test_data = self._batchify(self.corpus.test, mini_batch_size)
test_loss = self.evaluate(test_data, mini_batch_size, sequence_length)
summary = "TEST: valid loss {:5.2f} | valid ppl {:8.2f}".format(
test_loss, math.exp(test_loss)
)
with open(loss_txt, "a") as myfile:
myfile.write("%s\n" % summary)
log.info(summary)
log.info("-" * 89)
def evaluate(self, data_source, eval_batch_size, sequence_length):
# Turn on evaluation mode which disables dropout.
self.model.eval()
with torch.no_grad():
total_loss = 0
ntokens = len(self.corpus.dictionary)
hidden = self.model.init_hidden(eval_batch_size)
for i in range(0, data_source.size(0) - 1, sequence_length):
data, targets = self._get_batch(data_source, i, sequence_length)
prediction, rnn_output, hidden = self.model.forward(data, hidden)
output_flat = prediction.view(-1, ntokens)
total_loss += len(data) * self.loss_function(output_flat, targets).data
hidden = self._repackage_hidden(hidden)
return total_loss.item() / len(data_source)
@staticmethod
def _batchify(data, batch_size):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // batch_size
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * batch_size)
# Evenly divide the data across the bsz batches.
data = data.view(batch_size, -1).t().contiguous()
return data
@staticmethod
def | |
"баскыһыанньа 14 балаҕан ыйа 1998", "sunday 14 september 1998"),
# saq
param('saq', "1 lapa le okuni 1980 kun 10:45 tesiran", "1 march 1980 monday 10:45 am"),
param('saq', "mderot ee inet 12 lapa le ong'wan 1824", "wednesday 12 april 1824"),
# sbp
param('sbp', "1 mupalangulwa mulungu 08:15 lwamilawu", "1 january sunday 08:15 am"),
param('sbp', "jtn 17 mokhu 2001", "wednesday 17 october 2001"),
# se
param('se', "láv 22 cuoŋománnu 10:08 iđitbeaivi", "saturday 22 april 10:08 am"),
param('se', "duorasdat 11 borgemánnu 1978 12:09 eb", "thursday 11 august 1978 12:09 pm"),
# seh
param('seh', "12 fevreiro 2005 sha", "12 february 2005 friday"),
param('seh', "chiposi 2 decembro 1987", "monday 2 december 1987"),
# ses
param('ses', "18 žuyye 2009 atalaata 03:12 aluula", "18 july 2009 tuesday 03:12 pm"),
param('ses', "asibti 2 awi 1987", "saturday 2 april 1987"),
# sg
param('sg', "5 ngubùe 1890 bïkua-ûse 12:08 lk", "5 april 1890 monday 12:08 pm"),
param('sg', "bk3 23 föndo 2001", "tuesday 23 june 2001"),
# shi-Latn
param('shi-Latn', "6 bṛayṛ 2014 akṛas 07:06 tifawt", "6 february 2014 wednesday 07:06 am"),
param('shi-Latn', "asamas 15 ɣuct 2045", "sunday 15 august 2045"),
# sk
param('sk', "15 marec 1987 utorok", "15 march 1987 tuesday"),
param('sk', "streda 17 mája 2003", "wednesday 17 may 2003"),
# sl
param('sl', "12 junij 2003 petek 10:09 pop", "12 june 2003 friday 10:09 pm"),
param('sl', "ponedeljek 15 okt 1997 09:07 dopoldne", "monday 15 october 1997 09:07 am"),
# smn
param('smn', "1 njuhčâmáánu 2008 majebaargâ 08:08 ip", "1 march 2008 tuesday 08:08 am"),
param('smn', "láv 23 roovvâd 1897", "saturday 23 october 1897"),
# sn
param('sn', "11 chikumi 1998 chipiri", "11 june 1998 tuesday"),
param('sn', "china 2 mbudzi 1890", "thursday 2 november 1890"),
# so
param('so', "sab 5 bisha saddexaad 1765 11:08 gn", "saturday 5 march 1765 11:08 pm"),
param('so', "16 lit 2008 axd", "16 december 2008 sunday"),
# sq
param('sq', "2 qershor 1997 e mërkurë 10:08 pasdite", "2 june 1997 wednesday 10:08 pm"),
param('sq', "pre 15 gusht 1885 04:54 e paradites", "friday 15 august 1885 04:54 am"),
# sr-Cyrl
param('sr-Cyrl', "16 април 2016 суб 03:46 по подне", "16 april 2016 saturday 03:46 pm"),
param('sr-Cyrl', "уторак 3 новембар 1999", "tuesday 3 november 1999"),
# sr-Latn
param('sr-Latn', "4 septembar 2000 četvrtak", "4 september 2000 thursday"),
param('sr-Latn', "uto 18 maj 2004 11:15 pre podne", "tuesday 18 may 2004 11:15 am"),
# sr
param('sr', "3 децембар 2005 уто 10:15 по подне", "3 december 2005 tuesday 10:15 pm"),
param('sr', "петак 12 август 2001", "friday 12 august 2001"),
# sv
param('sv', "4 augusti 2007 lördag 02:44 fm", "4 august 2007 saturday 02:44 am"),
param('sv', "onsdag 16 mars 08:15 eftermiddag", "wednesday 16 march 08:15 pm"),
# sw
param('sw', "5 mei 1994 jumapili 10:17 asubuhi", "5 may 1994 sunday 10:17 am"),
param('sw', "jumanne 2 desemba 2003", "tuesday 2 december 2003"),
# ta
param('ta', "6 ஏப்ரல் 1997 செவ்வாய் 02:09 முற்பகல்", "6 april 1997 tuesday 02:09 am"),
param('ta', "ஞாயி 1 ஜூன் 1998", "sunday 1 june 1998"),
# te
param('te', "సోమవారం 3 నవంబర 1887", "monday 3 november 1887"),
param('te', "5 మార్చి 2001 శుక్రవారం", "5 march 2001 friday"),
# teo
param('teo', "2 omodok'king'ol 1996 nakaare", "2 june 1996 tuesday"),
param('teo', "nakasabiti 4 jol 2001 01:12 ebongi", "saturday 4 july 2001 01:12 pm"),
# to
param('to', "5 fēpueli 2007 mōn 02:17 efiafi", "5 february 2007 monday 02:17 pm"),
param('to', "falaite 14 'okatopa 2015 09:48 hh", "friday 14 october 2015 09:48 am"),
# twq
param('twq', "17 feewiriye 2023 11:12 zaarikay b", "17 february 2023 11:12 pm"),
param('twq', "alzuma 11 sektanbur 2019", "friday 11 september 2019"),
# tzm
param('tzm', "2 yulyuz 2002 akwas 01:16 ḍeffir aza", "2 july 2002 thursday 01:16 pm"),
param('tzm', "asa 13 nwanbir 2005", "sunday 13 november 2005"),
# uz-Cyrl
param('uz-Cyrl', "пайшанба 24 ноябр 1957 01:18 то", "thursday 24 november 1957 01:18 am"),
param('uz-Cyrl', "4 авг 1887 чоршанба", "4 august 1887 wednesday"),
# uz-Latn
param('uz-Latn', "3 iyul 1997 payshanba 08:17 tk", "3 july 1997 thursday 08:17 pm"),
param('uz-Latn', "shan 15 sentabr 2008", "saturday 15 september 2008"),
# uz
param('uz', "1 fevral 1776 dushanba 09:17 to", "1 february 1776 monday 09:17 am"),
param('uz', "juma 18 aprel 2027", "friday 18 april 2027"),
# vun
param('vun', "2 aprilyi 1956 jumatatuu", "2 april 1956 monday"),
param('vun', "jumamosi 12 oktoba 02:16 kyiukonyi", "saturday 12 october 02:16 pm"),
# wae
param('wae', "zištag 16 abrille 2002", "tuesday 16 april 2002"),
param('wae', "27 öigšte 1669 fritag", "27 august 1669 friday"),
# xog
param('xog', "21 marisi 2001 owokubili", "21 march 2001 tuesday"),
param('xog', "kuta 30 okitobba 1955 02:17 eigulo", "friday 30 october 1955 02:17 pm"),
# yav
param('yav', "12 imɛŋ i puɔs 1998 metúkpíápɛ", "12 september 1998 wednesday"),
param('yav', "5 o10 2001 séselé 12:07 kiɛmɛ́ɛm", "5 october 2001 saturday 12:07 am"),
# yo
param('yo', "5 èrèlè 2005 ọjọ́rú 10:07 àárọ̀", "5 february 2005 wednesday 10:07 am"),
param('yo', "ọjọ́ àbámẹ́ta 2 oṣù ẹ̀bibi 1896", "saturday 2 may 1896"),
# zu
param('zu', "3 mashi 2007 ulwesibili 10:08", "3 march 2007 tuesday 10:08"),
param('zu', "son 23 umasingana 1996", "sunday 23 january 1996"),
])
def test_translation(self, shortname, datetime_string, expected_translation):
self.given_settings()
self.given_bundled_language(shortname)
self.given_string(datetime_string)
self.when_datetime_string_translated()
self.then_string_translated_to(expected_translation)
@parameterized.expand([
# English
param('en', "yesterday", "1 day ago"),
param('en', "today", "0 day ago"),
param('en', "day before yesterday", "2 day ago"),
param('en', "last month", "1 month ago"),
param('en', "less than a minute ago", "45 second ago"),
# German
param('de', "vorgestern", "2 day ago"),
param('de', "heute", "0 day ago"),
param('de', "vor 3 Stunden", "3 hour ago"),
param('de', "vor 2 Monaten", "2 month ago"),
param('de', "vor 2 Monaten, 2 Wochen", "2 month ago 2 week"),
# French
param('fr', "avant-hier", "2 day ago"),
param('fr', "hier", "1 day ago"),
param('fr', "aujourd'hui", "0 day ago"),
param('fr', "après dix ans", "in 10 year"),
# Spanish
param('es', "anteayer", "2 day ago"),
param('es', "ayer", "1 day ago"),
param('es', "ayer a las", "1 day ago "),
param('es', "hoy", "0 day ago"),
param('es', "hace un horas", "1 hour ago"),
param('es', "2 semanas", "2 week"),
param('es', "2 año", "2 year"),
# Italian
param('it', "altro ieri", "2 day ago"),
param('it', "ieri", "1 day ago"),
param('it', "oggi", "0 day ago"),
param('it', "2 settimana fa", "2 week ago"),
param('it', "2 anno fa", "2 year ago"),
# Portuguese
param('pt', "anteontem", "2 day ago"),
param('pt', "ontem", "1 day ago"),
param('pt', "hoje", "0 day ago"),
param('pt', "56 minutos", "56 minute"),
param('pt', "12 dias", "12 day"),
param('pt', "há 14 min.", "14 minute ago."),
param('pt', "1 segundo atrás", "1 second ago"),
# Russian
param('ru', "9 месяцев", "9 month"),
param('ru', "8 недель", "8 week"),
param('ru', "7 лет", "7 year"),
param('ru', "позавчера", "2 day ago"),
param('ru', "сейчас", "0 second ago"),
param('ru', "спустя 2 дня", "in 2 day"),
param('ru', "вчера", "1 day ago"),
param('ru', "сегодня", "0 day ago"),
param('ru', "завтра", "in 1 day"),
param('ru', "послезавтра", "in 2 day"),
param('ru', "во вторник", " tuesday"),
param('ru', "в воскресенье", " sunday"),
param('ru', "несколько секунд", "44 second"),
param('ru', "через пару секунд", "in 2 second"),
param('ru', "одну минуту назад", "1 minute ago"),
param('ru', "через полчаса", "in 30 minute"),
param('ru', "сорок минут назад", "40 minute ago"),
param('ru', "в течение пары часов", "in 2 hour"),
param('ru', "через четыре часа", "in 4 hour"),
param('ru', "в течение суток", "in 1 day"),
param('ru', "двое суток назад", "2 day ago"),
param('ru', "неделю назад", "1 week ago"),
param('ru', "две недели назад", "2 week ago"),
param('ru', "три месяца назад", "3 month ago"),
param('ru', "спустя полгода", "in 6 month"),
param('ru', "через год", "in 1 year"),
param('ru', "через полтора года", "in 18 month"),
# Turkish
param('tr', "dün", "1 day ago"),
param('tr', "22 dakika", "22 minute"),
param('tr', "12 hafta", "12 week"),
param('tr', "13 yıl", "13 year"),
# Czech
param('cs', "40 sekunda", "40 second"),
param('cs', "4 týden", "4 week"),
param('cs', "14 roků", "14 year"),
# Chinese
param('zh', "昨天", "1 day ago"),
param('zh', "前天", "2 day ago"),
param('zh', "50 秒", "50 second"),
param('zh', "7 周", "7 week"),
param('zh', "12 年", "12 year"),
param('zh', "半小时前", "30 minute ago"),
# Danish
param('da', "i går", "1 day ago"),
param('da', "i dag", "0 day ago"),
param('da', "sidste | |
u'finished',
'builder': mock.ANY,
})
def test_api_failure_on_error_in_exit(self):
response = Mock(status_code=500, reason='Internal Server Error')
self.mocks.configure_mock('docker_client', {
'kill.side_effect': BuildEnvironmentError('Failed')
})
build_env = DockerBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
)
with build_env:
pass
# api() is not called anymore, we use api_v2 instead
self.assertFalse(self.mocks.api()(DUMMY_BUILD_ID).put.called)
# No commands were executed
self.assertFalse(self.mocks.mocks['api_v2.command'].post.called)
self.mocks.mocks['api_v2.build']().put.assert_called_with({
'id': DUMMY_BUILD_ID,
'version': self.version.pk,
'success': False,
'project': self.project.pk,
'setup_error': '',
'exit_code': 1,
'length': 0,
'error': 'Failed',
'setup': '',
'output': '',
'state': 'finished',
'builder': mock.ANY,
})
def test_api_failure_returns_previous_error_on_error_in_exit(self):
"""
Treat previously raised errors with more priority.
Don't report a connection problem to Docker on cleanup if we have a more
usable error to show the user.
"""
response = Mock(status_code=500, reason='Internal Server Error')
self.mocks.configure_mock('docker_client', {
'kill.side_effect': BuildEnvironmentError('Outer failed')
})
build_env = DockerBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
)
with build_env:
raise BuildEnvironmentError('Inner failed')
# api() is not called anymore, we use api_v2 instead
self.assertFalse(self.mocks.api()(DUMMY_BUILD_ID).put.called)
# No commands were executed
self.assertFalse(self.mocks.mocks['api_v2.command'].post.called)
self.mocks.mocks['api_v2.build']().put.assert_called_with({
'id': DUMMY_BUILD_ID,
'version': self.version.pk,
'success': False,
'project': self.project.pk,
'setup_error': '',
'exit_code': 1,
'length': 0,
'error': 'Inner failed',
'setup': '',
'output': '',
'state': 'finished',
'builder': mock.ANY,
})
def test_command_execution(self):
"""Command execution through Docker."""
self.mocks.configure_mock(
'docker_client', {
'exec_create.return_value': {'Id': b'container-foobar'},
'exec_start.return_value': b'This is the return',
'exec_inspect.return_value': {'ExitCode': 1},
})
build_env = DockerBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
)
with build_env:
build_env.run('echo test', cwd='/tmp')
self.mocks.docker_client.exec_create.assert_called_with(
container='build-123-project-6-pip',
cmd="/bin/sh -c 'cd /tmp && echo\\ test'", stderr=True, stdout=True)
self.assertEqual(build_env.commands[0].exit_code, 1)
self.assertEqual(build_env.commands[0].output, u'This is the return')
self.assertEqual(build_env.commands[0].error, None)
self.assertTrue(build_env.failed)
# api() is not called anymore, we use api_v2 instead
self.assertFalse(self.mocks.api()(DUMMY_BUILD_ID).put.called)
# The command was saved
command = build_env.commands[0]
self.mocks.mocks['api_v2.command'].post.assert_called_once_with({
'build': DUMMY_BUILD_ID,
'command': command.get_command(),
'description': command.description,
'output': command.output,
'exit_code': 1,
'start_time': command.start_time,
'end_time': command.end_time,
})
self.mocks.mocks['api_v2.build']().put.assert_called_with({
'id': DUMMY_BUILD_ID,
'version': self.version.pk,
'success': False,
'project': self.project.pk,
'setup_error': '',
'exit_code': 1,
'length': 0,
'error': '',
'setup': '',
'output': '',
'state': 'finished',
'builder': mock.ANY,
})
def test_command_not_recorded(self):
"""Command execution through Docker without record the command."""
self.mocks.configure_mock(
'docker_client', {
'exec_create.return_value': {'Id': b'container-foobar'},
'exec_start.return_value': b'This is the return',
'exec_inspect.return_value': {'ExitCode': 1},
})
build_env = DockerBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
)
with build_env:
build_env.run('echo test', cwd='/tmp', record=False)
self.mocks.docker_client.exec_create.assert_called_with(
container='build-123-project-6-pip',
cmd="/bin/sh -c 'cd /tmp && echo\\ test'", stderr=True, stdout=True)
self.assertEqual(len(build_env.commands), 0)
self.assertFalse(build_env.failed)
# api() is not called anymore, we use api_v2 instead
self.assertFalse(self.mocks.api()(DUMMY_BUILD_ID).put.called)
# The command was not saved
self.assertFalse(self.mocks.mocks['api_v2.command'].post.called)
self.mocks.mocks['api_v2.build']().put.assert_called_with({
'id': DUMMY_BUILD_ID,
'version': self.version.pk,
'success': True,
'project': self.project.pk,
'setup_error': '',
'length': 0,
'error': '',
'setup': '',
'output': '',
'state': 'finished',
'builder': mock.ANY,
})
def test_record_command_as_success(self):
self.mocks.configure_mock(
'docker_client', {
'exec_create.return_value': {'Id': b'container-foobar'},
'exec_start.return_value': b'This is the return',
'exec_inspect.return_value': {'ExitCode': 1},
})
build_env = DockerBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
)
with build_env:
build_env.run('echo test', cwd='/tmp', record_as_success=True)
self.mocks.docker_client.exec_create.assert_called_with(
container='build-123-project-6-pip',
cmd="/bin/sh -c 'cd /tmp && echo\\ test'", stderr=True, stdout=True)
self.assertEqual(build_env.commands[0].exit_code, 0)
self.assertEqual(build_env.commands[0].output, u'This is the return')
self.assertEqual(build_env.commands[0].error, None)
self.assertFalse(build_env.failed)
# api() is not called anymore, we use api_v2 instead
self.assertFalse(self.mocks.api()(DUMMY_BUILD_ID).put.called)
# The command was saved
command = build_env.commands[0]
self.mocks.mocks['api_v2.command'].post.assert_called_once_with({
'build': DUMMY_BUILD_ID,
'command': command.get_command(),
'description': command.description,
'output': command.output,
'exit_code': 0,
'start_time': command.start_time,
'end_time': command.end_time,
})
self.mocks.mocks['api_v2.build']().put.assert_called_with({
'id': DUMMY_BUILD_ID,
'version': self.version.pk,
'success': True,
'project': self.project.pk,
'setup_error': '',
'exit_code': 0,
'length': 0,
'error': '',
'setup': '',
'output': '',
'state': 'finished',
'builder': mock.ANY,
})
def test_command_execution_cleanup_exception(self):
"""Command execution through Docker, catch exception during cleanup."""
response = Mock(status_code=500, reason='Because')
self.mocks.configure_mock(
'docker_client', {
'exec_create.return_value': {'Id': b'container-foobar'},
'exec_start.return_value': b'This is the return',
'exec_inspect.return_value': {'ExitCode': 0},
'kill.side_effect': DockerAPIError(
'Failure killing container',
response,
'Failure killing container',
)
})
build_env = DockerBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
)
with build_env:
build_env.run('echo', 'test', cwd='/tmp')
self.mocks.docker_client.kill.assert_called_with(
'build-123-project-6-pip')
self.assertTrue(build_env.successful)
# api() is not called anymore, we use api_v2 instead
self.assertFalse(self.mocks.api()(DUMMY_BUILD_ID).put.called)
# The command was saved
command = build_env.commands[0]
self.mocks.mocks['api_v2.command'].post.assert_called_once_with({
'build': DUMMY_BUILD_ID,
'command': command.get_command(),
'description': command.description,
'output': command.output,
'exit_code': 0,
'start_time': command.start_time,
'end_time': command.end_time,
})
self.mocks.mocks['api_v2.build']().put.assert_called_with({
'id': DUMMY_BUILD_ID,
'version': self.version.pk,
'error': '',
'success': True,
'project': self.project.pk,
'setup_error': u'',
'exit_code': 0,
'length': 0,
'setup': u'',
'output': u'',
'state': u'finished',
'builder': mock.ANY,
})
def test_container_already_exists(self):
"""Docker container already exists."""
self.mocks.configure_mock(
'docker_client', {
'inspect_container.return_value': {'State': {'Running': True}},
'exec_create.return_value': {'Id': b'container-foobar'},
'exec_start.return_value': b'This is the return',
'exec_inspect.return_value': {'ExitCode': 0},
})
build_env = DockerBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
)
def _inner():
with build_env:
build_env.run('echo', 'test', cwd='/tmp')
self.assertRaises(BuildEnvironmentError, _inner)
self.assertEqual(
str(build_env.failure),
'A build environment is currently running for this version')
self.assertEqual(self.mocks.docker_client.exec_create.call_count, 0)
self.assertTrue(build_env.failed)
# api() is not called anymore, we use api_v2 instead
self.assertFalse(self.mocks.api()(DUMMY_BUILD_ID).put.called)
# The build failed before executing any command
self.assertFalse(self.mocks.mocks['api_v2.command'].post.called)
self.mocks.mocks['api_v2.build']().put.assert_called_with({
'id': DUMMY_BUILD_ID,
'version': self.version.pk,
'success': False,
'project': self.project.pk,
'setup_error': '',
'exit_code': 1,
'length': 0,
'error': 'A build environment is currently running for this version',
'setup': '',
'output': '',
'state': 'finished',
'builder': mock.ANY,
})
def test_container_timeout(self):
"""Docker container timeout and command failure."""
response = Mock(status_code=404, reason='Container not found')
self.mocks.configure_mock(
'docker_client', {
'inspect_container.side_effect': [
DockerAPIError(
'No container found',
response,
'No container found',
),
{'State': {'Running': False, 'ExitCode': 42}},
],
'exec_create.return_value': {'Id': b'container-foobar'},
'exec_start.return_value': b'This is the return',
'exec_inspect.return_value': {'ExitCode': 0},
})
build_env = DockerBuildEnvironment(
version=self.version,
project=self.project,
build={'id': DUMMY_BUILD_ID},
)
with build_env:
build_env.run('echo', 'test', cwd='/tmp')
self.assertEqual(str(build_env.failure), 'Build exited due to time out')
self.assertEqual(self.mocks.docker_client.exec_create.call_count, 1)
self.assertTrue(build_env.failed)
# api() is not called anymore, we use api_v2 instead
self.assertFalse(self.mocks.api()(DUMMY_BUILD_ID).put.called)
# The command was saved
command = build_env.commands[0]
self.mocks.mocks['api_v2.command'].post.assert_called_once_with({
'build': DUMMY_BUILD_ID,
'command': command.get_command(),
'description': command.description,
'output': command.output,
'exit_code': 0,
'start_time': command.start_time,
'end_time': command.end_time,
})
self.mocks.mocks['api_v2.build']().put.assert_called_with({
'id': DUMMY_BUILD_ID,
'version': self.version.pk,
'success': False,
'project': self.project.pk,
'setup_error': u'',
'exit_code': 1,
'length': 0,
'error': 'Build exited due to time out',
'setup': u'',
'output': u'',
'state': u'finished',
'builder': mock.ANY,
})
class TestBuildCommand(TestCase):
"""Test build command creation."""
def test_command_env(self):
"""Test build command env vars."""
env = {'FOOBAR': 'foobar', 'BIN_PATH': 'foobar'}
cmd = BuildCommand('echo', environment=env)
for key in list(env.keys()):
self.assertEqual(cmd.environment[key], env[key])
def test_result(self):
"""Test result of output using unix true/false commands."""
cmd = BuildCommand('true')
cmd.run()
self.assertTrue(cmd.successful)
cmd = BuildCommand('false')
cmd.run()
self.assertTrue(cmd.failed)
def test_missing_command(self):
"""Test missing command."""
path = os.path.join('non-existant', str(uuid.uuid4()))
self.assertFalse(os.path.exists(path))
cmd = BuildCommand(path)
cmd.run()
missing_re = re.compile(r'(?:No such file or directory|not found)')
self.assertRegex(cmd.error, missing_re)
def test_input(self):
"""Test input to command."""
cmd = BuildCommand('/bin/cat', input_data='FOOBAR')
cmd.run()
self.assertEqual(cmd.output, 'FOOBAR')
def test_output(self):
"""Test output command."""
cmd = BuildCommand(['/bin/bash', '-c', 'echo -n FOOBAR'])
# Mock BuildCommand.sanitized_output just to count the amount of calls,
# but use the original method to behaves as real
original_sanitized_output = cmd.sanitize_output
with patch('readthedocs.doc_builder.environments.BuildCommand.sanitize_output') as sanitize_output: # noqa
sanitize_output.side_effect = original_sanitized_output
cmd.run()
self.assertEqual(cmd.output, 'FOOBAR')
# Check that we sanitize the output
self.assertEqual(sanitize_output.call_count, 2)
def test_error_output(self):
"""Test error output from command."""
# Test default combined output/error streams
cmd = BuildCommand(['/bin/bash', '-c', 'echo -n FOOBAR 1>&2'])
cmd.run()
self.assertEqual(cmd.output, 'FOOBAR')
self.assertIsNone(cmd.error)
# Test non-combined streams
cmd = BuildCommand(['/bin/bash', '-c', 'echo -n FOOBAR 1>&2'],
combine_output=False)
cmd.run()
self.assertEqual(cmd.output, '')
self.assertEqual(cmd.error, 'FOOBAR')
def test_sanitize_output(self):
cmd = BuildCommand(['/bin/bash', '-c', 'echo'])
checks = (
(b'Hola', 'Hola'),
(b'H\x00i', 'Hi'),
(b'H\x00i \x00\x00\x00You!\x00', 'Hi You!'),
)
for output, sanitized in checks:
self.assertEqual(cmd.sanitize_output(output), sanitized)
@patch('subprocess.Popen')
def test_unicode_output(self, mock_subprocess):
"""Unicode output from command."""
mock_process = Mock(**{
'communicate.return_value': (SAMPLE_UTF8_BYTES, b''),
})
mock_subprocess.return_value = mock_process
cmd = BuildCommand(['echo', 'test'], cwd='/tmp/foobar')
cmd.run()
self.assertEqual(
cmd.output,
u'H\xe9r\xc9 \xee\xdf s\xf6m\xea \xfcn\xef\xe7\xf3\u2202\xe9')
class TestDockerBuildCommand(TestCase):
"""Test docker build commands."""
def setUp(self):
self.mocks = EnvironmentMockGroup()
self.mocks.start()
def tearDown(self):
self.mocks.stop()
def test_wrapped_command(self):
"""Test shell wrapping for Docker chdir."""
cmd = DockerBuildCommand(['pip', 'install', 'requests'],
cwd='/tmp/foobar')
self.assertEqual(
cmd.get_wrapped_command(),
"/bin/sh -c 'cd /tmp/foobar && pip install requests'",
)
cmd = DockerBuildCommand(
['python', '/tmp/foo/pip', 'install', 'Django>1.7'],
cwd='/tmp/foobar',
bin_path='/tmp/foo',
)
self.assertEqual(
cmd.get_wrapped_command(),
('/bin/sh -c '
"'cd /tmp/foobar && PATH=/tmp/foo:$PATH "
r"python /tmp/foo/pip install Django\>1.7'"),
)
def test_unicode_output(self):
"""Unicode output from command."""
self.mocks.configure_mock(
'docker_client', {
'exec_create.return_value': {'Id': b'container-foobar'},
'exec_start.return_value': SAMPLE_UTF8_BYTES,
'exec_inspect.return_value': {'ExitCode': 0},
})
cmd = DockerBuildCommand(['echo', 'test'], cwd='/tmp/foobar')
cmd.build_env = Mock()
cmd.build_env.get_client.return_value = self.mocks.docker_client
type(cmd.build_env).container_id = PropertyMock(return_value='foo')
cmd.run()
self.assertEqual(
cmd.output,
u'H\xe9r\xc9 \xee\xdf s\xf6m\xea \xfcn\xef\xe7\xf3\u2202\xe9')
self.assertEqual(self.mocks.docker_client.exec_start.call_count, 1)
self.assertEqual(self.mocks.docker_client.exec_create.call_count, 1)
self.assertEqual(self.mocks.docker_client.exec_inspect.call_count, 1)
def test_command_oom_kill(self):
"""Command is OOM killed."""
self.mocks.configure_mock(
'docker_client', {
'exec_create.return_value': {'Id': b'container-foobar'},
'exec_start.return_value': b'Killed\n',
'exec_inspect.return_value': {'ExitCode': 137},
})
cmd = DockerBuildCommand(['echo', 'test'], cwd='/tmp/foobar')
cmd.build_env = Mock()
cmd.build_env.get_client.return_value = self.mocks.docker_client
type(cmd.build_env).container_id = PropertyMock(return_value='foo')
cmd.run()
self.assertIn(
'Command killed due to excessive memory consumption\n',
str(cmd.output)
)
class TestPythonEnvironment(TestCase):
def setUp(self):
self.project_sphinx = get(Project, documentation_type='sphinx')
self.version_sphinx = get(Version, project=self.project_sphinx)
self.project_mkdocs = get(Project, documentation_type='mkdocs')
self.version_mkdocs = get(Version, project=self.project_mkdocs)
self.build_env_mock = Mock()
self.base_requirements = [
'Pygments',
'setuptools',
'docutils',
'mock',
'pillow',
'alabaster',
]
| |
# -------------------------------------------------------------------------
# Copyright (c) <NAME>. All rights reserved.
# Licensed under the Apache License, Version 2.0. See
# License.txt in the project root for license
# information.
# ---------------
"""
The sync selection is used to prepare one or more directories for the sync
operation. You can use the resource methods to populate the selection (i.e. add
directories) and then submit the entire selection for immediate or scheduled
execution.
The sync selection is a temporary resource. It does not survive system crashes
and server shutdowns, nor does it need to be explicitly destroyed by the
caller. It goes out of scope by invoking the submit method, which effectively
passes the control to the Job manager. The owner of the sync selection resource
is thus the P5 system, so the caller does not need (nor should) perform any
other task with the same resource.
Usage:
To use the SyncSelection resource, you must first use the create method to
create a new instance. Having created an instance, use the adddirectory method
to fill-in the selection with directories to synchronize. Finally, submit the
selection for immediate or scheduled execution. After submission, the resource
goes out of scope and should not be used any more.
"""
from awp5.base.connection import P5Resource, exec_nsdchat
from awp5.base.helpers import resourcelist, onereturnvalue
from awp5.api.job import Job
from awp5.api.syncplan import SyncPlan
module_name = "SyncSelection"
@onereturnvalue
def create(syncplan, as_object=False, p5_connection=None):
"""
Syntax: SyncSelection create <plan>
Description: Creates new temporary sync selection resource. The resource
will be automatically deleted after the associated sync job has been
submitted.
The <plan> must be one of the registered synchronize plans. You can get the
list of synchronize plans with the SyncPlan names CLI command
Return Values:
-On Success: the name of the new resource. Use this name to
address the resource in all the other methods
"""
method_name = "create"
result = exec_nsdchat([module_name, method_name, syncplan], p5_connection)
if not as_object:
return result
else:
return resourcelist(result, SyncSelection, p5_connection)
@onereturnvalue
def adddirectory(syncselection_name, path, p5_connection=None):
"""
Syntax: SyncSelection <name> adddirectory <path>
Description: Adds one new directory <path> to the sync selection <name>. It
expects the absolute path to the directory to be synced. The directory must
be located on the source client and under the source path as given in the
sync plan used to create the sync selection object.
Return Values:
-On Success: the directory path
"""
method_name = "adddirectory"
return exec_nsdchat([module_name, syncselection_name, method_name, path],
p5_connection)
@onereturnvalue
def addrecursive(syncselection_name, path, p5_connection=None):
"""
Syntax: SyncSelection <name> addrecursive <path>
Description: Adds a single new directory <path> to the sync selection
<name> and recurses into the subfolders of that directory. It expects the
absolute path to the directory to be synced. The directory must be located
on the source client and under the source path as given in the sync plan
used to create the sync selection object.
Return Values:
-On Success: the directory path repeated
"""
method_name = "addrecursive"
return exec_nsdchat([module_name, syncselection_name, method_name, path],
p5_connection)
@onereturnvalue
def destroy(syncselection_name, p5_connection=None):
"""
Syntax: SyncSelection <name> destroy
Description: Explicitly destroys the sync selection. The <name> should not
be used in any SyncSelection commands afterwards.
Return Values:
-On Success: the string "0" (destroyed)
the string "1" (not destroyed)
"""
method_name = "destroy"
return exec_nsdchat([module_name, syncselection_name, method_name],
p5_connection)
@onereturnvalue
def onjobactivation(syncselection_name, command=None, p5_connection=None):
"""
Syntax: SyncSelection <name> onjobactivation [<command>]
Description: Registers the <command> to be executed just before the job is
started by the submit method. The command itself can be any valid OS
command plus variable number of arguments.
The very first argument of the command (the program itself) can be
prepended with the name of the P5 client where the command is to be
executed on. If omitted, the command will be executed on the client which
the SyncSelecttion object is created for.
Examples:
SyncSelection SyncSelection.0 onjobactivation "mickey:/var/myscript arg"
will execute /var//myscript on the client "mickey" regardless what client
the SyncSelection is created for. The program will be passed one argument:
arg.
SyncSelection SyncSelection.0 onjobactivation "/var/scripts/myscript"
will execute /var/scripts/myscript on the client the SyncSelection is
created for.
SyncSelection SyncSelection.0 onjobactivation
"localhost:/var/scripts/myscript"
will execute /var/scripts/myscript on the P5 server.
Return Values:
-On Success: the command string
"""
method_name = "onjobactivation"
return exec_nsdchat([module_name, syncselection_name, method_name,
command], p5_connection)
@onereturnvalue
def onjobcompletion(syncselection_name, command=None, p5_connection=None):
"""
Syntax: SyncSelection <name> onjobcompletion [<value>]
Description: Registers the <command> to be executed immediately after the
job created by the submit method is completed. See onjobactivation for
further information.
Return Values:
-On Success: the command string
"""
method_name = "onjobcompletion"
return exec_nsdchat([module_name, syncselection_name, method_name,
command], p5_connection)
@onereturnvalue
def submit(syncselection_name, now=True, as_object=False, p5_connection=None):
"""
Syntax: SyncSelection <name> submit [<now>]
Description: Submits the sync selection for execution. You can optionally
override plan execution times by giving the <now> as one of the strings
"1", "t", "true", "True", "y", "yes", or "Yes".
This command implicitly destroys the SyncSelection object for the user and
transfers the ownership of the internal underlying object to the job
scheduler. You should not attempt to use the <name> afterwards.
Return Values:
-On Success: the sync job ID. Use this job ID to query the
status of the job by using Job resource.
Please see the Job resource description for details.
"""
method_name = "submit"
now_option = ""
if now is True:
now_option = "1"
result = exec_nsdchat([module_name, syncselection_name, method_name,
now_option], p5_connection)
if not as_object:
return result
else:
return resourcelist(result, Job, p5_connection)
class SyncSelection(P5Resource):
def __init__(self, syncselection_name, p5_connection=None):
super().__init__(syncselection_name, p5_connection)
@onereturnvalue
def create(syncplan=None, as_object=True, p5_connection=None):
"""
Syntax: SyncSelection create <plan>
Description: Creates new temporary sync selection resource. The
resource will be automatically deleted after the associated sync job
has been submitted.
The <plan> must be one of the registered synchronize plans. You can get
the list of synchronize plans with the SyncPlan names CLI command
Return Values:
-On Success: the name of the new resource. Use this name to
address the resource in all the other methods
"""
method_name = "create"
result = exec_nsdchat([module_name, method_name, syncplan],
p5_connection)
if not as_object:
return result
else:
return resourcelist(result, SyncSelection, p5_connection)
@onereturnvalue
def adddirectory(self, path):
"""
Syntax: SyncSelection <name> adddirectory <path>
Description: Adds one new directory <path> to the sync selection
<name>. It expects the absolute path to the directory to be synced. The
directory must be located on the source client and under the source
path as given in the sync plan used to create the sync selection
object.
Return Values:
-On Success: the directory path
"""
method_name = "adddirectory"
return self.p5_connection.nsdchat_call([module_name, self.name,
method_name, path])
@onereturnvalue
def addrecursive(self, path):
"""
Syntax: SyncSelection <name> addrecursive <path>
Description: Adds a single new directory <path> to the sync selection
<name> and recurses into the subfolders of that directory. It expects
the absolute path to the directory to be synced. The directory must be
located on the source client and under the source path as given in the
sync plan used to create the sync selection object.
Return Values:
-On Success: the directory path repeated
"""
method_name = "addrecursive"
return self.p5_connection.nsdchat_call([module_name, self.name,
method_name, path])
@onereturnvalue
def destroy(self):
"""
Syntax: SyncSelection <name> destroy
Description: Explicitly destroys the sync selection. The <name> should
not be used in any SyncSelection commands afterwards.
Return Values:
-On Success: the string "0" (destroyed)
the string "1" (not destroyed)
"""
method_name = "destroy"
return self.p5_connection.nsdchat_call([module_name, self.name,
method_name])
@onereturnvalue
def onjobactivation(self, command=None):
"""
Syntax: SyncSelection <name> onjobactivation [<command>]
Description: Registers the <command> to be executed just before the job
is started by the submit method. The command itself can be any valid OS
command plus variable number of arguments.
The very first argument of the command (the program itself) can be
prepended with the name of the P5 client where the command is to be
executed on. If omitted, the command will be executed on the client
which the SyncSelecttion object is created for.
Examples:
SyncSelection SyncSelection.0 onjobactivation
"mickey:/var/myscript arg"
will execute /var//myscript on the client "mickey" regardless what
client the SyncSelection is created for. The program will be passed one
argument: arg.
SyncSelection SyncSelection.0 onjobactivation "/var/scripts/myscript"
will execute /var/scripts/myscript on the client the SyncSelection is
created for.
SyncSelection SyncSelection.0 | |
TT as a two-part Julian date.
:type date1, date2: float
:returns: combined precessoin/nutation matrix, as a numpy.matrix of shape \
3x3.
.. seealso:: |MANUAL| page 173
"""
rmatpn = _np.asmatrix(_np.zeros(shape=(3,3), dtype=float, order='C'))
_sofa.iauPnm80(date1, date2, rmatpn)
return rmatpn
# iauPom00
_sofa.iauPom00.argtypes = [_ct.c_double, #xp
_ct.c_double, #yp
_ct.c_double, #sp
_ndpointer(shape=(3,3), dtype=float, flags='C')] #rpom
def pom00(xp, yp, sp):
""" Form the matrix of polar motion for a given date, IAU 2000.
:param xp, yp: coordinates of the pole in radians.
:type xp, yp: float
:param sp: the TIO locator in radians.
:type sp: float
:returns: the polar motion matrix, as a numpy.matrix of shape 3x3.
.. seealso:: |MANUAL| page 174
"""
rpom = _np.asmatrix(_np.zeros(shape=(3,3), dtype=float, order='C'))
_sofa.iauPom00(float(xp), float(yp), float(sp), rpom)
return rpom
# iauPpp
_sofa.iauPpp.argtypes = [_ndpointer(shape=(1,3), dtype=float, flags='C'), #a
_ndpointer(shape=(1,3), dtype=float, flags='C'), #b
_ndpointer(shape=(1,3), dtype=float, flags='C')] #apb
def ppp(a, b):
""" P-vector addition.
:param a: first p-vector.
:type a: array-like of shape (1,3)
:param b: second p-vector.
:type b: array-like of shape (1,3)
:returns: a + b as a numpy.matrix of shape 1x3.
.. seealso:: |MANUAL| page 175
"""
apb = _np.asmatrix(_np.zeros(shape=(1,3), dtype=float, order='C'))
_sofa.iauPpp(_req_shape_c(a, float, (1,3)),
_req_shape_c(b, float, (1,3)), apb)
return apb
# iauPpsp
_sofa.iauPpsp.argtypes = [_ndpointer(shape=(1,3), dtype=float, flags='C'), #a
_ct.c_double, #s
_ndpointer(shape=(1,3), dtype=float, flags='C'), #b
_ndpointer(shape=(1,3), dtype=float, flags='C')] #apsb
def ppsp(a, s, b):
""" P-vector plus scaled p-vector.
:param a: first p-vector.
:type a: array-like of shape (1,3)
:param s: scalar (multiplier for *b*).
:type s: float
:param b: second p-vector.
:type b: array-like of shape (1,3)
:returns: a + s*b as a numpy.matrix of shape 1x3.
.. seealso:: |MANUAL| page 176
"""
apsb = _np.asmatrix(_np.zeros(shape=(1,3), dtype=float, order='C'))
_sofa.iauPpsp(_req_shape_c(a, float, (1,3)), s,
_req_shape_c(b, float, (1,3)), apsb)
return apsb
# iauPr00
_sofa.iauPr00.argtypes = [_ct.c_double, #date1
_ct.c_double, #date2
_ct.POINTER(_ct.c_double), #dpsipr
_ct.POINTER(_ct.c_double)] #depspr
def pr00(date1, date2):
""" Precession-rate part of the IAU 2000 precession-nutation models.
:param date1, date2: TT as a two-part Julian date.
:type date1, date2: float
:returns: a 2-tuple:
* precession correction in longitude (float)
* precession correction in obliquity (float).
.. seealso:: |MANUAL| page 177
"""
dpsipr = _ct.c_double()
depspr = _ct.c_double()
_sofa.iauPr00(date1, date2, _ct.byref(dpsipr), _ct.byref(depspr))
return dpsipr.value, depspr.value
# iauPrec76
_sofa.iauPrec76.argtypes = [_ct.c_double, #ep01
_ct.c_double, #ep02
_ct.c_double, #ep11
_ct.c_double, #ep12
_ct.POINTER(_ct.c_double), #zeta
_ct.POINTER(_ct.c_double), #z
_ct.POINTER(_ct.c_double)] #theta
def prec76(ep01, ep02, ep11, ep12):
""" Form the three Euler angles wich implement general precession between
two epochs, using IAU 1976 model (as for FK5 catalog).
:param ep01, ep02: two-part TDB starting epoch.
:type ep01, ep02: float
:param ep11, ep12: two-part TDB ending epoch.
:type ep11, ep12: float
:returns: a 3-tuple:
* 1st rotation: radians cw around z (float)
* 3rd rotation: radians cw around z (float)
* 2nd rotation: radians ccw around y (float).
.. seealso:: |MANUAL| page 179
"""
zeta = _ct.c_double()
z = _ct.c_double()
theta = _ct.c_double()
_sofa.iauPrec76(ep01, ep02, ep11, ep12, _ct.byref(zeta), _ct.byref(z),
_ct.byref(theta))
return zeta.value, z.value, theta.value
# iauPv2p
_sofa.iauPv2p.argtypes = [_ndpointer(shape=(2,3), dtype=float, flags='C'), #pv
_ndpointer(shape=(1,3), dtype=float, flags='C')] #p
def pv2p(pv):
""" Discard velocity component of a pv-vector.
:param pv: pv-vector.
:type pv: array-like of shape (2,3)
:returns: p-vector as a numpy.matrix of shape 1x3.
.. seealso:: |MANUAL| page 181
"""
p = _np.asmatrix(_np.zeros(shape=(1,3), dtype=float, order='C'))
_sofa.iauPv2p(_req_shape_c(pv, float, (2,3)), p)
return p
# iauPv2s
_sofa.iauPv2s.argtypes = [_ndpointer(shape=(2,3), dtype=float, flags='C'), #pv
_ct.POINTER(_ct.c_double), #theta
_ct.POINTER(_ct.c_double), #phi
_ct.POINTER(_ct.c_double), #r
_ct.POINTER(_ct.c_double), #td
_ct.POINTER(_ct.c_double), #pd
_ct.POINTER(_ct.c_double)] #rd
def pv2s(pv):
""" Convert position/velocity from cartesian to spherical coordinates.
:param pv: pv-vector.
:type pv: array-like of shape (2,3)
:returns: a 6-tuple:
* longitude angle :math:`\\theta` in radians (float)
* latitude angle :math:`\phi` in radians (float)
* radial distance *r* (float)
* rate of change of :math:`\\theta` (float)
* rate of change of :math:`\phi` (float)
* rate of change of *r* (float)
.. seealso:: |MANUAL| page 182
"""
theta = _ct.c_double()
phi = _ct.c_double()
r = _ct.c_double()
td = _ct.c_double()
pd = _ct.c_double()
rd = _ct.c_double()
_sofa.iauPv2s(_req_shape_c(pv, float, (2,3)), _ct.byref(theta),
_ct.byref(phi), _ct.byref(r), _ct.byref(td), _ct.byref(pd),
_ct.byref(rd))
return theta.value, phi.value, r.value, td.value, pd.value, rd.value
# iauPvdpv
_sofa.iauPvdpv.argtypes = [_ndpointer(shape=(2,3), dtype=float, flags='C'), #a
_ndpointer(shape=(2,3), dtype=float, flags='C'), #b
_ndpointer(shape=(1,2), dtype=float, flags='C')] #adb
def pvdpv(a, b):
""" Inner product of two pv-vectors.
:param a: first pv-vector.
:type a: array-like of shape (2,3)
:param b: second pv-vector.
:type b: array-like of shape (2,3)
:returns: a . b as a numpy.matrix of shape 1x2.
.. seealso:: |MANUAL| page 183
"""
adb = _np.asmatrix(_np.zeros(shape=(2), dtype=float, order='C'))
_sofa.iauPvdpv(_req_shape_c(a, float, (2,3)),
_req_shape_c(b, float, (2,3)), adb)
return adb
# iauPvm
_sofa.iauPvm.argtypes = [_ndpointer(shape=(2,3), dtype=float, flags='C'), #pv
_ct.POINTER(_ct.c_double), #r
_ct.POINTER(_ct.c_double)] #s
def pvm(pv):
""" Modulus of pv-vector.
:param pv: pv-vector.
:type pv: array-like of shape (2,3)
:returns: a 2-tuple:
* modulus of position component (float)
* modulus of velocity component (float).
.. seealso:: |MANUAL| page 184
"""
r = _ct.c_double()
s = _ct.c_double()
_sofa.iauPvm(_req_shape_c(pv, float, (2,3)), _ct.byref(r), _ct.byref(s))
return r.value, s.value
# iauPvmpv
_sofa.iauPvmpv.argtypes = [_ndpointer(shape=(2,3), dtype=float, flags='C'), #a
_ndpointer(shape=(2,3), dtype=float, flags='C'), #b
_ndpointer(shape=(2,3), dtype=float, flags='C')] #amb
def pvmpv(a, b):
""" Subtract one pv-vector from another.
:param a: first pv-vector.
:type a: array-like of shape (2,3)
:param b: second pv-vector.
:type b: array-like of shape (2,3)
:returns: a - b as a numpy.matrix of shape 2x3.
.. seealso:: |MANUAL| page 185
"""
amb = _np.asmatrix(_np.zeros(shape=(2,3), dtype=float, order='C'))
_sofa.iauPvmpv(_req_shape_c(a, float, (2,3)),
_req_shape_c(b, float, (2,3)), amb)
return amb
# iauPvppv
_sofa.iauPvppv.argtypes = [_ndpointer(shape=(2,3), dtype=float, flags='C'), #a
_ndpointer(shape=(2,3), dtype=float, flags='C'), #b
_ndpointer(shape=(2,3), dtype=float, flags='C')] #apb
def pvppv(a, b):
""" Add one pv-vector to another.
:param a: first pv-vector.
:type a: array-like of shape (2,3)
:param b: second pv-vector.
:type b: array-like of shape (2,3)
:returns: a + b as a numpy.matrix of shape 2x3.
.. seealso:: |MANUAL| page 186
"""
apb = _np.asmatrix(_np.zeros(shape=(2,3), dtype=float, order='C'))
_sofa.iauPvppv(_req_shape_c(a, float, (2,3)),
_req_shape_c(b, float, (2,3)), apb)
return apb
# iauPvstar
_sofa.iauPvstar.argtypes = [
_ndpointer(shape=(2,3), dtype=float, flags='C'), #pv
_ct.POINTER(_ct.c_double), #ra
_ct.POINTER(_ct.c_double), #dec
_ct.POINTER(_ct.c_double), #pmr
_ct.POINTER(_ct.c_double), #pmd
_ct.POINTER(_ct.c_double), #px
_ct.POINTER(_ct.c_double)] #rv
_sofa.iauPvstar.restype = _ct.c_int
_pvstar_msg = {
-1: 'superluminal speed',
-2: 'null position vector'
}
def pvstar(pv):
""" Convert star position-velocity vector to catalog coordinates.
:param pv: pv-vector (AU, AU/day).
:type pv: array-like of shape (2,3)
:returns: a 6-tuple:
* right ascensin in radians (float)
* declination in radians (float)
* RA proper motion (radians/year) (float)
* Dec proper motion (radians/year) (float)
* parallax in arcseconds (float)
* radial velocity (km/s, positive = receding)
:raises: :exc:`ValueError` if the speed is greater than or equal to the
speed of light.
.. seealso:: |MANUAL| page 187
"""
ra = _ct.c_double()
dec = _ct.c_double()
pmr = _ct.c_double()
pmd = _ct.c_double()
px = _ct.c_double()
rv = _ct.c_double()
s = _sofa.iauPvstar(_req_shape_c(pv, float, (2,3)), _ct.byref(ra),
_ct.byref(dec), _ct.byref(pmr), _ct.byref(pmd),
_ct.byref(px), _ct.byref(rv))
if s != 0:
raise ValueError(_pvstar_msg[s])
return ra.value, dec.value, pmr.value, pmd.value, px.value, rv.value
# iauPvu
_sofa.iauPvu.argtypes = [_ct.c_double, #dt
_ndpointer(shape=(2,3), dtype=float, flags='C'), #pv
_ndpointer(shape=(2,3), dtype=float, flags='C')] #upv
def pvu(dt, pv):
""" Update a pv-vector.
:param dt: time interval.
:type dt: float
:param pv: pv-vector.
:type pv: array-like of shape (2,3)
:returns: a new pv-vector as a numpy.matrix of shape 2x3, with p \
updated and v unchanged.
.. seealso:: |MANUAL| page 189
"""
upv = _np.asmatrix(_np.zeros(shape=(2,3), dtype=float, order='C'))
_sofa.iauPvu(dt, _req_shape_c(pv, float, (2,3)), upv)
return upv
# iauPvup
_sofa.iauPvup.argtypes = [_ct.c_double, #dt
_ndpointer(shape=(2,3), dtype=float, flags='C'), #pv
_ndpointer(shape=(1,3), dtype=float, flags='C')] #p
def pvup(dt, pv):
""" Update a pv-vector, discarding the velocity component.
:param dt: time interval.
:type dt: float
:param pv: pv-vector.
:type pv: array-like of shape (2,3)
:returns: a new p-vector, as a numpy.matrix of shape 1x3.
.. seealso:: |MANUAL| page 190
"""
p = _np.asmatrix(_np.zeros(shape=(1,3), dtype=float, order='C'))
_sofa.iauPvup(dt, _req_shape_c(pv, float, (2,3)), p)
return p
# iauPvxpv
_sofa.iauPvxpv.argtypes = [_ndpointer(shape=(2,3), dtype=float, flags='C'), #a
_ndpointer(shape=(2,3), dtype=float, flags='C'), #b
_ndpointer(shape=(2,3), dtype=float, flags='C')] #axb
def pvxpv(a, b):
""" Outer product of two pv-vectors.
:param a: first pv-vector.
:type a: array-like of shape (2,3)
:param b: second pv-vector.
:type b: array-like of shape (2,3)
:returns: a x b as a numpy.matrix of shape 2x3.
.. seealso:: |MANUAL| page 191
"""
axb = _np.asmatrix(_np.zeros(shape=(2,3), dtype=float, order='C'))
_sofa.iauPvxpv(_req_shape_c(a, float, (2,3)),
_req_shape_c(b, float, (2,3)), axb)
return axb
# iauPxp
_sofa.iauPxp.argtypes = [_ndpointer(shape=(1,3), dtype=float, flags='C'), #a
_ndpointer(shape=(1,3), dtype=float, flags='C'), #b
_ndpointer(shape=(1,3), dtype=float, flags='C')] #axb
def pxp(a, b):
""" P-vector outer product.
:param | |
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim: set ts=4 sw=4 et sts=4 ai:
from __future__ import print_function
import binascii
import crcmod
import ctypes
import enum
import math
import re
import sys
from utils import *
# Remove after https://bugs.python.org/issue19023 is fixed.
assert sys.byteorder == 'little'
ctypes.LittleEndianUnion = ctypes.Union
class DynamicLengthStructure(ctypes.LittleEndianStructure):
r"""
>>> class Test(DynamicLengthStructure):
... _fields_ = [
... ("b1", ctypes.c_uint8),
... ("crc8", ctypes.c_uint8),
... ("_len", ctypes.c_uint8),
... ("_data", ctypes.c_ubyte * 0),
... ]
>>> assert ctypes.sizeof(Test) == 3
>>> t1 = Test()
>>> ctypes.sizeof(t1)
3
>>> t1._extra_size
0
>>> t1.b1
0
>>> t1.crc8
0
>>> t1.len
0
>>> t1._len
0
>>> t1.data[:]
[]
>>> type(t1.data)
<class '__main__.c_ubyte_Array_0'>
>>> ctypes.sizeof(t1.data)
0
>>> t1.as_bytearray()
bytearray(b'\x00\x00\x00')
>>> # Changing normal value
>>> t1.b1 = 0xa
>>> t1.b1
10
>>> t1.as_bytearray()
bytearray(b'\n\x00\x00')
>>> # Increasing data length
>>> t1.len = 1
>>> ctypes.sizeof(t1)
4
>>> t1.data[:]
[0]
>>> type(t1.data)
<class '__main__.c_ubyte_Array_1'>
>>> ctypes.sizeof(t1.data)
1
>>> t1.as_bytearray()
bytearray(b'\n\x00\x01\x00')
>>> t1.data[0] = 0xf
>>> t1.data[:]
[15]
>>> t1.as_bytearray()
bytearray(b'\n\x00\x01\x0f')
>>> # Increasing data length further
>>> t1.len = 2
>>> ctypes.sizeof(t1)
5
>>> t1.data[:]
[15, 0]
>>> t1.as_bytearray()
bytearray(b'\n\x00\x02\x0f\x00')
>>> t1.data[1] = 0xe
>>> t1.data[:]
[15, 14]
>>> t1.as_bytearray()
bytearray(b'\n\x00\x02\x0f\x0e')
>>> # Test CRC calculation
>>> t2 = Test()
>>> # CRC doesn't change on CRC value
>>> hex(t2.crc_calculate())
'0x0'
>>> t2.crc8 = 0xa
>>> t2.crc8
10
>>> hex(t2.crc_calculate())
'0x0'
>>> # CRC changes on changing b1
>>> t2.b1 = 1
>>> t2.b1
1
>>> hex(t2.crc_calculate())
'0x15'
>>> # CRC changes on changing values in the data section
>>> t2.len = 1
>>> hex(t2.crc_calculate())
'0x7e'
>>> t2.data[0] = 1
>>> hex(t2.crc_calculate())
'0x79'
>>> t2.crc_check()
False
>>> t2.crc_update()
>>> hex(t2.crc8)
'0x79'
>>> t2.crc_check()
True
>>> class TestExtra(DynamicLengthStructure):
... _fields_ = [
... ("b1", ctypes.c_uint8),
... ("crc8", ctypes.c_uint8),
... ("_len", ctypes.c_uint8),
... ("b2", ctypes.c_uint8),
... ("_data", ctypes.c_ubyte * 0),
... ]
>>> assert ctypes.sizeof(TestExtra) == 4
>>> t3 = TestExtra()
>>> ctypes.sizeof(t3)
4
>>> t3._extra_size
1
>>> t3.len
0
>>> t3._len
1
>>> t3.b2 = 0xf
>>> t3.as_bytearray()
bytearray(b'\x00\x00\x01\x0f')
>>> t3.data[:]
[]
>>> type(t3.data)
<class '__main__.c_ubyte_Array_0'>
>>> ctypes.sizeof(t3.data)
0
>>> t3.len = 1
>>> ctypes.sizeof(t3)
5
>>> t3.data[:]
[0]
>>> type(t3.data)
<class '__main__.c_ubyte_Array_1'>
>>> ctypes.sizeof(t3.data)
1
>>> t3.as_bytearray()
bytearray(b'\x00\x00\x02\x0f\x00')
>>> t3.data[0] = 0xe
>>> t3.data[:]
[14]
>>> t3.as_bytearray()
bytearray(b'\x00\x00\x02\x0f\x0e')
"""
_pack_ = 1
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
if self._len == 0:
self._len = self._extra_size
@property
def _extra_start(self):
return self.__class__._len.offset+self.__class__._len.size
@property
def _extra_end(self):
return self.__class__._data.offset
@property
def _extra_size(self):
return self._extra_end - self._extra_start
@property
def len(self):
return self._len - self._extra_size
@len.setter
def len(self, value):
rsize = self._extra_end + value
if ctypes.sizeof(self) < rsize:
try:
ctypes.resize(self, rsize)
except ValueError:
raise ValueError("Need %s more space" % (rsize - ctypes.sizeof(self)))
self._len = self._extra_size + value
@property
def data(self):
addr = ctypes.addressof(self)
return (ctypes.c_ubyte * self.len).from_address(addr+self._extra_end)
def as_bytearray(self):
return bytearray((ctypes.c_ubyte * ctypes.sizeof(self)).from_address(ctypes.addressof(self)))
def crc_calculate(self):
raw_bytes = self.as_bytearray()
bytes_before = raw_bytes[0:self.__class__.crc8.offset]
bytes_after = raw_bytes[self.__class__.crc8.offset+1:]
crc = crcmod.predefined.Crc('crc-8')
crc.update(bytes_before)
crc.update(bytes_after)
return crc.crcValue
def crc_check(self):
return self.crc8 == self.crc_calculate()
def crc_update(self):
self.crc8 = self.crc_calculate()
assert self.crc_check()
class Atom(DynamicLengthStructure):
TYPE = 0xff
_fields_ = [
("type", ctypes.c_uint8),
("_len", ctypes.c_uint8),
("_data", ctypes.c_ubyte * 0),
]
def __repr__(self):
r"""
>>> repr(Atom(0xff, 0))
"Atom(b'\\xff\\x00')"
>>> a = Atom(0xfe)
>>> a.len = 2
>>> a.data[:] = [0x1, 0x2]
>>> repr(a)
"Atom(b'\\xfe\\x02\\x01\\x02')"
"""
return "%s(%s)" % (self.__class__.__name__, repr(self.as_bytearray())[10:-1])
assert ctypes.sizeof(Atom) == 2
class AtomFormatString(Atom):
FORMAT = 0x00
TYPES = {}
@classmethod
def create(cls, s):
u"""
>>> a1 = AtomFormatString.create("numato")
>>> a1.len
6
>>> a1.str
'numato'
>>> a1.as_bytearray()
bytearray(b'\\xff\\x06numato')
>>> a1.data[:]
[110, 117, 109, 97, 116, 111]
>>> a2 = AtomFormatString.create(u"\u2603")
>>> a2.len
3
>>> a2.str
'☃'
>>> a2.data[:]
[226, 152, 131]
"""
o = cls(type=cls.TYPE)
assert o.type == cls.TYPE
assert o._len == 0
o.str = s
return o
@property
def str(self):
return bytearray(self.data[:]).decode('utf-8')
@str.setter
def str(self, s):
b = s.encode('utf-8')
self.len = len(b)
self.data[:] = b[:]
def __repr__(self):
r"""
>>> a1 = AtomFormatString.create("numato")
>>> repr(a1)
"AtomFormatString('numato')"
>>> a2 = AtomFormatString.create(u"\u2603")
>>> repr(a2)
"AtomFormatString('☃')"
"""
return u"%s(%r)" % (self.__class__.__name__, self.str)
class AtomFormatURL(AtomFormatString):
FORMAT = 0x10
TYPES = {}
@classmethod
def create(cls, url):
u"""
>>> a1 = AtomFormatURL.create("https://numato")
>>> a1.len
6
>>> a1.url
'https://numato'
>>> a1.as_bytearray()
bytearray(b'\\xff\\x06numato')
>>> a1.data[:]
[110, 117, 109, 97, 116, 111]
>>> a2 = AtomFormatURL.create(u"http://\u2603")
>>> a2.len
3
>>> a2.url
'https://☃'
>>> a2.data[:]
[226, 152, 131]
"""
o = cls(type=cls.TYPE)
assert o.type == cls.TYPE
assert o._len == 0
o.url = url
return o
@property
def url(self):
return "https://" + self.str
@url.setter
def url(self, url):
if "://" in url:
url = url.split("://", 1)[-1]
self.str = url
def __repr__(self):
r"""
>>> a1 = AtomFormatURL.create("numato")
>>> repr(a1)
"AtomFormatURL('https://numato')"
>>> a2 = AtomFormatURL.create(u"\u2603")
>>> repr(a2)
"AtomFormatURL('https://☃')"
"""
return u"%s(%r)" % (self.__class__.__name__, self.url)
class AtomFormatRelativeURL(AtomFormatString):
FORMAT = 0x20
TYPES = {}
_fields_ = [
("index", ctypes.c_uint8),
("_data", ctypes.c_char * 0),
]
rurl = AtomFormatString.str
@classmethod
def create(cls, index, url):
u"""
>>> a1 = AtomFormatRelativeURL.create(2, "numato")
>>> a1.len
6
>>> a1.str
'numato'
>>> a1.as_bytearray()
bytearray(b'\\xff\\x07\\x02numato')
>>> a1.data[:]
[110, 117, 109, 97, 116, 111]
>>> a2 = AtomFormatRelativeURL.create(4, u"\u2603")
>>> a2.len
3
>>> a2.str
'☃'
>>> a2.data[:]
[226, 152, 131]
>>> a2.as_bytearray()
bytearray(b'\\xff\\x04\\x04\\xe2\\x98\\x83')
"""
assert not url.startswith('/')
o = cls(type=cls.TYPE)
assert o.type == cls.TYPE
assert o._len == 1
o.index = index
o.str = url
o._relative_atom = None
return o
def __repr__(self):
r"""
>>> a1 = AtomFormatRelativeURL.create(1, "numato")
>>> repr(a1)
"AtomFormatRelativeURL(1, 'numato')"
>>> a2 = AtomFormatRelativeURL.create(2, u"\u2603")
>>> repr(a2)
"AtomFormatRelativeURL(2, '☃')"
>>> ar = AtomFormatURL.create("a")
>>> a3 = AtomFormatRelativeURL.create(2, "b")
>>> a3._relative_atom = ar
>>> repr(a3)
"AtomFormatRelativeURL('https://a/b')"
"""
if self._relative_atom:
assert isinstance(self._relative_atom, AtomFormatURL)
return u"%s('%s/%s')" % (self.__class__.__name__, self._relative_atom.url, self.str)
else:
return u"%s(%i, '%s')" % (self.__class__.__name__, self.index, self.str)
class AtomFormatExpandInt(Atom):
@classmethod
def create(cls, v):
r"""
>>> a1 = AtomFormatExpandInt.create(0)
>>> a1.len
0
>>> a1.v
0
>>> a1.as_bytearray()
bytearray(b'\xff\x00')
>>> a1 = AtomFormatExpandInt.create(2)
>>> a1.len
1
>>> a1.v
2
>>> a1.as_bytearray()
bytearray(b'\xff\x01\x02')
>>> a2 = AtomFormatExpandInt.create(2**63)
>>> a2.len
8
>>> a2.v
9223372036854775808
>>> a2.as_bytearray()
bytearray(b'\xff\x08\x00\x00\x00\x00\x00\x00\x00\x80')
"""
o = cls(type=cls.TYPE)
assert o.type == cls.TYPE
assert o._len == 0
o.v = v
return o
@property
def v(self):
v = 0
for i, b in enumerate(self.data):
v |= b << i * 8
return v
@v.setter
def v(self, v):
b = []
while v > 0:
b.append(v & 0xff)
v = v >> 8
self.len = len(b)
self.data[:] = bytearray(b)
def __repr__(self):
r"""
>>> a1 = AtomFormatExpandInt.create(2)
>>> repr(a1)
'AtomFormatExpandInt(2)'
>>> a2 = AtomFormatExpandInt.create(2**63)
>>> repr(a2)
'AtomFormatExpandInt(9223372036854775808)'
"""
return "%s(%i)" % (self.__class__.__name__, self.v)
class AtomFormatTimestamp(AtomFormatExpandInt):
FORMAT = 0x30
TYPES = {}
EPOCH = 1420070400 # 2015/01/01 @ 12:00am (UTC)
@classmethod
def create(cls, ts):
r"""
>>> t1 = 1421070400.0 # 2015-01-12 13:46:40 UTC
>>> a1 = AtomFormatTimestamp.create(t1)
>>> a1._len
3
>>> a1.ts
1421070400
>>> a1.as_bytearray()
bytearray(b'\xff\x03@B\x0f')
>>> t2 = 1451606400.0 # 2016-01-01 00:00:00 UTC
>>> a2 = AtomFormatTimestamp.create(t2)
>>> a2._len
4
>>> a2.ts
1451606400
>>> a2.as_bytearray()
bytearray(b'\xff\x04\x803\xe1\x01')
>>> t3 = 1606780801.0 # 2020-01-12 00:00:01 UTC
>>> a3 = AtomFormatTimestamp.create(t3)
>>> a3._len
4
>>> a3.ts
1606780801
>>> a3.as_bytearray()
bytearray(b'\xff\x04\x81\xf9 \x0b')
>>> a4 = AtomFormatTimestamp.create(2**63)
>>> a4._len
8
>>> a4.ts
9223372036854775808
>>> a4.as_bytearray()
bytearray(b'\xff\x08\x00r[\xab\xff\xff\xff\x7f')
"""
o = cls(type=cls.TYPE)
assert o.type == cls.TYPE
o.ts = int(round(ts))
return o
@property
def ts(self):
return self.EPOCH + self.v
@ts.setter
def ts(self, ts):
assert (ts - self.EPOCH) > 0, (ts - self.EPOCH)
self.v = ts - self.EPOCH
def __repr__(self):
r"""
>>> a1 = AtomFormatTimestamp.create(1421070400)
>>> repr(a1)
'AtomFormatTimestamp(1421070400)'
>>> a2 = AtomFormatTimestamp.create(2**63)
>>> repr(a2)
'AtomFormatTimestamp(9223372036854775808)'
"""
return u"%s(%i)" % (self.__class__.__name__, self.ts)
def _l(license, version):
return license << 3 | version
class _NamesStruct(ctypes.LittleEndianStructure):
_pack_ = 1
_fields_ = [
("license", ctypes.c_uint8, 5),
("version", ctypes.c_uint8, 3),
]
class _NamesUnion(ctypes.LittleEndianUnion):
_pack_ = 1
_fields_ = [
("_value", ctypes.c_uint8),
("_parts", _NamesStruct)
]
class AtomFormatLicense(Atom):
FORMAT = 0x40
TYPES = {}
@enum.unique
class Names(enum.IntEnum):
Invalid = 0
# -
MIT = _l(1, 1)
# -
BSD_simple = _l(2, 1)
BSD_new = _l(2, 2)
BSD_isc = _l(2, 3)
# -
Apache_v2 = _l(3, 1)
# -
GPL_v2 = _l(4, 1)
GPL_v3 = _l(4, 2)
# -
LGPL_v21 = _l(5, 1)
LGPL_v3 = _l(5, 2)
# -
CC0_v1 = | |
<reponame>Joshuaalbert/neural_deprojection<gh_stars>0
import sys
sys.path.insert(1, '/data/s1825216/git/neural_deprojection/')
from neural_deprojection.models.identify_medium_SCD.generate_data import generate_data, decode_examples_old
from neural_deprojection.graph_net_utils import vanilla_training_loop, TrainOneEpoch, AbstractModule, \
get_distribution_strategy, build_log_dir, build_checkpoint_dir, batch_dataset_set_graph_tuples
import glob, os
import tensorflow as tf
from tensorflow_addons.image import gaussian_filter2d
import json
# import tensorflow_addons as tfa
import numpy as np
from functools import partial
from graph_nets.utils_tf import set_zero_global_features
from graph_nets import blocks
from graph_nets.modules import GraphNetwork
from graph_nets._base import WrappedModelFnModule
import sonnet as snt
from graph_nets.graphs import GraphsTuple
from graph_nets.modules import _unsorted_segment_softmax, _received_edges_normalizer, GraphIndependent, SelfAttention, GraphNetwork
from graph_nets.utils_np import graphs_tuple_to_networkxs
from graph_nets.utils_tf import fully_connect_graph_dynamic
from networkx.drawing import draw
from networkx.linalg.spectrum import normalized_laplacian_spectrum
from networkx import Graph
import pylab as plt
from typing import Callable, Iterable, Optional, Text
from sonnet.src import base
from sonnet.src import initializers
from sonnet.src import linear
from sonnet.src import utils, once
class RelationNetwork(AbstractModule):
"""Implementation of a Relation Network.
See https://arxiv.org/abs/1706.01427 for more details.
The global and edges features of the input graph are not used, and are
allowed to be `None` (the receivers and senders properties must be present).
The output graph has updated, non-`None`, globals.
"""
def \
__init__(self,
edge_model_fn,
global_model_fn,
reducer=tf.math.unsorted_segment_mean, # try with mean instead of sum
use_globals=False,
name="relation_network"):
"""Initializes the RelationNetwork module.
Args:
edge_model_fn: A callable that will be passed to EdgeBlock to perform
per-edge computations. The callable must return a Sonnet module (or
equivalent; see EdgeBlock for details).
global_model_fn: A callable that will be passed to GlobalBlock to perform
per-global computations. The callable must return a Sonnet module (or
equivalent; see GlobalBlock for details).
reducer: Reducer to be used by GlobalBlock to aggregate edges. Defaults to
tf.math.unsorted_segment_sum.
name: The module name.
"""
super(RelationNetwork, self).__init__(name=name)
self._edge_block = blocks.EdgeBlock(
edge_model_fn=edge_model_fn,
use_edges=False,
use_receiver_nodes=True,
use_sender_nodes=True,
use_globals=use_globals)
self._global_block = blocks.GlobalBlock(
global_model_fn=global_model_fn,
use_edges=True,
use_nodes=False,
use_globals=use_globals,
edges_reducer=reducer)
def _build(self, graph):
"""Connects the RelationNetwork.
Args:
graph: A `graphs.GraphsTuple` containing `Tensor`s, except for the edges
and global properties which may be `None`.
Returns:
A `graphs.GraphsTuple` with updated globals.
Raises:
ValueError: If any of `graph.nodes`, `graph.receivers` or `graph.senders`
is `None`.
"""
edge_block = self._edge_block(graph)
# print(edge_block)
output_graph = self._global_block(edge_block)
# print(output_graph.globals)
return output_graph # graph.replace(globals=output_graph.globals)
class MultiHeadLinear(AbstractModule):
"""Linear module, optionally including bias."""
def __init__(self,
output_size: int,
num_heads: int = 1,
with_bias: bool = True,
w_init=None,
b_init=None,
name=None):
"""Constructs a `Linear` module.
Args:
output_size: Output dimensionality.
with_bias: Whether to include bias parameters. Default `True`.
w_init: Optional initializer for the weights. By default the weights are
initialized truncated random normal values with a standard deviation of
`1 / sqrt(input_feature_size)`, which is commonly used when the inputs
are zero centered (see https://arxiv.org/abs/1502.03167v3).
b_init: Optional initializer for the bias. By default the bias is
initialized to zero.
name: Name of the module.
"""
super(MultiHeadLinear, self).__init__(name=name)
self.output_size = output_size
self.with_bias = with_bias
self.w_init = w_init
self.num_heads = num_heads
if with_bias:
self.b_init = b_init if b_init is not None else snt.initializers.Zeros()
elif b_init is not None:
raise ValueError("When not using a bias the b_init must be None.")
@once.once
def _initialize(self, inputs: tf.Tensor):
"""Constructs parameters used by this module."""
utils.assert_minimum_rank(inputs, 2)
input_size = inputs.shape[-1]
if input_size is None: # Can happen inside an @tf.function.
raise ValueError("Input size must be specified at module build time.")
self.input_size = input_size
if self.w_init is None:
# See https://arxiv.org/abs/1502.03167v3.
stddev = 1 / tf.math.sqrt(self.input_size * 1.0)
self.w_init = snt.initializers.TruncatedNormal(stddev=stddev)
self.w = tf.Variable(
self.w_init([self.num_heads, self.input_size, self.output_size], inputs.dtype),
name="w")
if self.with_bias:
self.b = tf.Variable(
self.b_init([self.num_heads, self.output_size], inputs.dtype), name="b")
def _build(self, inputs: tf.Tensor) -> tf.Tensor:
self._initialize(inputs)
# [num_nodes, node_size].[num_heads, node_size, output_size] -> [num_nodes, num_heads, output_size]
outputs = tf.einsum('ns,hso->nho', inputs, self.w, optimize='optimal')
# outputs = tf.matmul(inputs, self.w)
if self.with_bias:
outputs = tf.add(outputs, self.b)
return outputs
class CoreNetwork(AbstractModule):
"""
Core network which can be used in the EncodeProcessDecode network. Consists of a (full) graph network block
and a self attention block.
"""
def __init__(self,
num_heads,
multi_head_output_size,
input_node_size,
name=None):
super(CoreNetwork, self).__init__(name=name)
self.num_heads = num_heads
self.multi_head_output_size = multi_head_output_size
self.output_linear = snt.Linear(output_size=input_node_size)
self.FFN = snt.nets.MLP([32, input_node_size], activate_final=False) # Feed forward network
self.normalization = lambda x: (x - tf.reduce_mean(x)) / tf.math.reduce_std(x)
self.v_linear = MultiHeadLinear(output_size=multi_head_output_size, num_heads=num_heads) # values
self.k_linear = MultiHeadLinear(output_size=multi_head_output_size, num_heads=num_heads) # keys
self.q_linear = MultiHeadLinear(output_size=multi_head_output_size, num_heads=num_heads) # queries
self.self_attention = SelfAttention()
def _build(self, latent):
node_values = self.v_linear(latent.nodes)
node_keys = self.k_linear(latent.nodes)
node_queries = self.q_linear(latent.nodes)
attended_latent = self.self_attention(node_values=node_values,
node_keys=node_keys,
node_queries=node_queries,
attention_graph=latent)
output_nodes = tf.reshape(attended_latent.nodes, (-1, self.num_heads * self.multi_head_output_size))
output_nodes = self.normalization(self.output_linear(output_nodes) + latent.nodes)
output_nodes = self.normalization(self.FFN(output_nodes))
output_graph = latent.replace(nodes=output_nodes)
return output_graph
class EncoderNetwork(AbstractModule):
"""
Core network which can be used in the EncodeProcessDecode network. Consists of a (full) graph network block
and a self attention block.
"""
def __init__(self,
edge_model_fn,
node_model_fn,
global_model_fn,
name=None):
super(EncoderNetwork, self).__init__(name=name)
self.node_block = blocks.NodeBlock(node_model_fn,
use_received_edges=False,
use_sent_edges=False,
use_nodes=True,
use_globals=False)
self.relation_network = RelationNetwork(edge_model_fn=edge_model_fn,
global_model_fn=global_model_fn)
def _build(self, input_graph):
latent = self.node_block(input_graph)
output = self.relation_network(latent)
return output
class EncodeProcessDecode(AbstractModule):
"""Full encode-process-decode model.
The model we explore includes three components:
- An "Encoder" graph net, which independently encodes the edge, node, and
global attributes (does not compute relations etc.).
- A "Core" graph net, which performs N rounds of processing (message-passing)
steps. The input to the Core is the concatenation of the Encoder's output
and the previous output of the Core (labeled "Hidden(t)" below, where "t" is
the processing step).
- A "Decoder" graph net, which independently decodes the edge, node, and
global attributes (does not compute relations etc.), on each message-passing
step.
Hidden(t) Hidden(t+1)
| ^
*---------* | *------* | *---------*
| | | | | | | |
Input --->| Encoder | *->| Core |--*->| Decoder |---> Output(t)
| |---->| | | |
*---------* *------* *---------*
"""
def __init__(self,
encoder,
core,
decoder,
name="EncodeProcessDecode"):
super(EncodeProcessDecode, self).__init__(name=name)
self._encoder = encoder
self._core = core
self._decoder = decoder
def _build(self, input_graph, num_processing_steps):
latent_graph = self._encoder(input_graph)
# for _ in range(num_processing_steps):
# latent_graph = self._core(latent_graph)
# state = (counter, latent_graph)
_, latent_graph = tf.while_loop(cond=lambda const, state: const < num_processing_steps,
body=lambda const, state: (const+1, self._core(state)),
loop_vars=(tf.constant(0), latent_graph))
return self._decoder(latent_graph)
class AutoEncoder(AbstractModule):
def __init__(self, kernel_size=4, name=None):
super(AutoEncoder, self).__init__(name=name)
self.encoder = snt.Sequential([snt.Conv2D(4, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu, # [4, 128, 128]
snt.Conv2D(8, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu, # [8, 64, 64]
snt.Conv2D(16, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu, # [16, 32, 32]
snt.Conv2D(32, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu]) # [32, 16, 16]
# snt.Conv2D(32, kernel_size, stride=2, padding='SAME'), tf.nn.relu,
# snt.Conv2D(64, kernel_size, stride=2, padding='SAME'), tf.nn.relu])
# self.decoder = snt.Sequential([snt.Conv2DTranspose(64, kernel_size, stride=2, padding='SAME'), tf.nn.relu,
# snt.Conv2DTranspose(32, kernel_size, stride=2, padding='SAME'), tf.nn.relu,
# snt.Conv2DTranspose(16, kernel_size, stride=2, padding='SAME'), tf.nn.relu,
self.decoder = snt.Sequential([snt.Conv2DTranspose(32, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu, # [32, 16, 16]
snt.Conv2DTranspose(16, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu, # [16, 32, 32]
snt.Conv2DTranspose(8, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu, # [8, 64, 64]
snt.Conv2DTranspose(4, kernel_size, stride=2, padding='SAME'), tf.nn.leaky_relu, # [4, 128, 128]
snt.Conv2D(1, kernel_size, padding='SAME')]) # [1, 256, 256]
@property
def step(self):
if self._step is None:
raise ValueError("Need to set step idx variable. model.step = epoch")
return self._step
@step.setter
def step(self, value):
self._step = value
def _build(self, batch):
(img, ) = batch
# img = gaussian_filter2d(img, filter_shape=[6, 6])
img_before_autoencoder = (img - tf.reduce_min(img)) / (tf.reduce_max(img) - tf.reduce_min(img))
tf.summary.image(f'img_before_autoencoder', img_before_autoencoder, step=self.step)
encoded_img = self.encoder(img)
print(encoded_img.shape)
decoded_img = self.decoder(encoded_img)
img_after_autoencoder = (decoded_img - tf.reduce_min(decoded_img)) / (
tf.reduce_max(decoded_img) - tf.reduce_min(decoded_img))
tf.summary.image(f'img_after_autoencoder', img_after_autoencoder, step=self.step)
return decoded_img
class Model(AbstractModule):
"""Model inherits from AbstractModule, which contains a __call__ function which executes a _build function
that is to be specified in the child class. So for example:
model = Model(), then model() returns the output of _build()
AbstractModule inherits from snt.Module, which has useful functions that can return the (trainable) variables,
so the Model class has this functionality as well
An instance of the RelationNetwork class also inherits from AbstractModule,
so it also executes its _build() function when called and it can return its (trainable) variables
A RelationNetwork contains an edge block and a global block:
The edge block generally uses the edge, receiver, sender and global attributes of the input graph
to calculate the new edges.
In our case we currently only use the receiver and sender attributes to calculate the edges.
The global block generally uses the aggregated edge, aggregated node and the global attributes of the input graph
to calculate the new globals.
In our case we currently only use the aggregated edge attributes to calculate the new globals.
As input the RelationNetwork | |
Global command to configure PortChannel Graceful shutdown and interface specific gshut configs
:param dut:
:param kwargs:
:return:
Usage:
config_po_graceful_shutdown(dut1)
config_po_graceful_shutdown(dut1,exception_po_list='PortChannel10')
config_po_graceful_shutdown(dut1,config_mode='del')
config_po_graceful_shutdown(dut1,config_mode='del',exception_po_list='PortChannel10')
### To disable/enable po gshut only on individual POs.
### This will be in effect only if global PO GSHUT enable already configured
## 1) To enable PO GSHUT at interface level, config_mode='del',as u need to negate global action
config_po_graceful_shutdown(dut1,config_level='interface',config_mode='del',\
exception_po_list=["PortChannel10","PortChannel11"])
## 2) To disable PO GSHUT at interface level, config_mode='add' [default option with global mode]
config_po_graceful_shutdown(dut1,config_level='interface',config_mode='add',\
exception_po_list=["PortChannel10","PortChannel11"])
'''
cli_type = kwargs.pop('cli_type', st.get_ui_type(dut))
config_mode = kwargs.pop('config', 'add')
config_level = kwargs.pop('config_level', 'global')
if config_level == 'interface' and 'exception_po_list' not in kwargs:
st.error("Mandatory parameter exception_po_list not found for config_level:interface")
return False
cmd = []
if cli_type == 'click':
global_config_str = 'disable' if config_mode=='del' else 'enable'
po_config_str = 'disable' if config_mode == 'add' else 'enable'
if 'exception_po_list' in kwargs:
exception_po_list = kwargs['exception_po_list']
exception_po_list = [exception_po_list] if type(exception_po_list) is str else exception_po_list
for po in exception_po_list:
cmd.append("config portchannel graceful-shutdown {} {}".format(po_config_str,po))
if config_level == 'global':
cmd.append("config portchannel graceful-shutdown {}".format(global_config_str))
elif cli_type == 'klish':
global_config_str = 'no ' if config_mode=='del' else ''
po_config_str = 'no ' if config_mode == 'add' else ''
if 'exception_po_list' in kwargs:
exception_po_list = kwargs['exception_po_list']
exception_po_list = [exception_po_list] if type(exception_po_list) is str else exception_po_list
for po in exception_po_list:
po_int = uutils.get_interface_number_from_name(po)
cmd.append("interface {} {}".format(po_int['type'], po_int['number']))
cmd.append("{}graceful-shutdown".format(po_config_str))
cmd.append("exit")
if config_level == 'global':
cmd.append("{}portchannel graceful-shutdown".format(global_config_str))
elif cli_type in ['rest-put', 'rest-patch']:
### ONLY global gshut handles now.
global_config_str = 'disable' if config_mode == 'del' else 'enable'
po_config_str = 'disable' if config_mode == 'add' else 'enable'
rest_urls = st.get_datastore(dut, "rest_urls")
global_url = rest_urls['po_gshut_config']
if config_mode =='add':
if 'exception_po_list' in kwargs:
exception_po_list = kwargs['exception_po_list']
exception_po_list = [exception_po_list] if type(exception_po_list) is str else exception_po_list
intf_payload = {"openconfig-interfaces-ext:graceful-shutdown-mode": po_config_str}
for po in exception_po_list:
interface_url = rest_urls['po_ghsut_intf_config'].format(po)
if not config_rest(dut, http_method=cli_type, rest_url=interface_url, json_data=intf_payload):
st.banner('FAIL-OCYANG: Failed to unconfigure PO gshut at interface level')
return False
if config_level == 'global':
payload = {"openconfig-aggregate-ext:graceful-shutdown-mode": global_config_str}
if not config_rest(dut, http_method=cli_type, rest_url=global_url, json_data=payload):
st.banner('FAIL-OCYANG: Config PO gshut globally Failed')
return False
elif config_mode =='del':
if 'exception_po_list' in kwargs:
exception_po_list = kwargs['exception_po_list']
exception_po_list = [exception_po_list] if type(exception_po_list) is str else exception_po_list
intf_payload = {"openconfig-interfaces-ext:graceful-shutdown-mode": po_config_str}
for po in exception_po_list:
interface_url = rest_urls['po_ghsut_intf_config'].format(po)
if not config_rest(dut, http_method=cli_type, rest_url=interface_url, json_data=intf_payload):
st.banner('FAIL-OCYANG: Failed to configure PO gshut at interface level')
return False
if config_level == 'global':
if not delete_rest(dut,rest_url=global_url):
st.banner('FAIL-OCYANG:UnConfig PO gshut globally Failed')
return
else:
st.error("UNSUPPORTED cli_type")
return
st.config(dut, cmd, type=cli_type)
def verify_lacp_fallback(dut,**kwargs):
"""
Klish support not available
Author: <NAME>
email: <EMAIL>
Verify show interfaces portchannel <portchannel-name> fallback output
:param dut:
:param kwargs: Parameters can be <port_channel_name|fallback_config|fallback_oper_status>
:param kwargs:port_channel_name is mandatory
:return:
Usage:
verify_lacp_fallback(dut1,port_channel_name='PortChannel10", fallback_config='Enabled', fallback_oper_status='Disabled')
verify_lacp_fallback(dut1,port_channel_name='PortChannel10", fallback_config='Enabled', fallback_oper_status='Enabled')
verify_lacp_fallback(dut1,port_channel_name='PortChannel10", fallback_config='Disabled', fallback_oper_status='Disabled')
"""
cli_type = st.get_ui_type(dut, **kwargs)
kwargs.pop('cli_type', None)
ret_val = True
if 'port_channel_name' not in kwargs:
st.error("Mandatory argument Port Channel name Not Found")
return False
if cli_type == 'click':
cmd = 'show interfaces portchannel {} fallback'.format(kwargs['port_channel_name'])
output = st.show(dut, cmd, type=cli_type, config="false", skip_error_check="True")
elif cli_type == 'klish':
output = []
intf_data = uutils.get_interface_number_from_name(kwargs['port_channel_name'])
cmd = 'show interface PortChannel {}'.format(intf_data["number"])
raw_output = st.show(dut, cmd, type=cli_type)
try:
data = raw_output[0]
temp = dict()
temp['port_channel_name'] = "PortChannel{}".format(data['channel_number'])
temp['fallback_config'] = data['fallback']
temp['fallback_oper_status'] = 'Enabled' if data['oper_fallback']=='Operational' else 'Disabled'
output.append(temp)
st.debug(output)
except Exception as e:
st.error("{} exception occurred".format(e))
st.debug("The raw output is: {}".format(raw_output))
return False
elif cli_type in ['rest-put', 'rest-patch']:
output = rest_get_fallback_status(dut, kwargs['port_channel_name'])
else:
st.error("Unsupported CLI Type: {}".format(cli_type))
return False
if len(output) == 0:
st.error("Output is Empty, here's the output: {}".format(output))
return False
for key in kwargs:
if str(kwargs[key]) != str(output[0][key]):
st.error("Match NOT FOUND for {} : Expected -<{}> Actual-<{}> ".format(key, kwargs[key], output[0][key]))
ret_val = False
else:
st.log("Match FOUND for {} : Expected -<{}> Actual-<{}> ".format(key, kwargs[key], output[0][key]))
return ret_val
def verify_portchannel_fallback_status(dut, portchannel, members_list, iter_count=10, iter_delay=1, state='up', static=False, cli_type=""):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
if cli_type == "click":
proto = "NONE" if static else "LACP"
if state.lower() == 'up':
portchannel_state = '{}(A)(Up)'.format(proto)
portchannel_member_list = []
for member in members_list:
portchannel_member_list.append(str(member)+'(S)')
elif state.lower() == 'down':
portchannel_state = '{}(A)(Dw)'.format(proto)
else:
st.log("Invalid LAG status provided as input to verify")
return False
i = 1
while i <= iter_count:
st.log("Checking iteration {}".format(i))
st.wait(iter_delay)
output_dict = get_portchannel(dut, portchannel_name=portchannel)[0]
ports_list = (output_dict['ports'].strip()).split(' ')
if output_dict['teamdev'] == portchannel:
if (output_dict['protocol'] == portchannel_state):
for portchannel_member in portchannel_member_list:
if portchannel_member in ports_list:
return True
else:
st.log("The Portchannel-{} is not found".format(portchannel))
return False
i += 1
elif cli_type == "klish":
channel_number = portchannel.replace("PortChannel","")
portchannel_details = get_interface_portchannel(dut, channel_number=channel_number, cli_type=cli_type)
if not portchannel_details:
st.log("PortChannel Details not found -- {}".format(portchannel_details))
return False
if state == "up":
if portchannel_details[0]["fallback"] != "Enabled" or portchannel_details[0]["state"] != "up":
st.log("Fallback state is not matching -- Expecting Enabled but it is {}".format(portchannel_details[0]["fallback"]))
return False
elif portchannel_details[0]["fallback"] == "Enabled" and portchannel_details[0]["state"] != "up":
st.log("Portchannel state is not up eventhough, fallback mode is enabled.")
return False
else:
return True
return False
def config_properties(dut, params, cli_type=""):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
"""
API to configure portchannel properties like mtu, no mtu, no fallback and no min links
:param dut:
:param params: [{"portchannel_name":"PortChannel001","mtu":"9000", "no_mtu":True/False,
"no_min_links" : True, min_links:1, "fallback":True/False, "v4_address":"2.2.2.2","v4_subnet":24
"v6_address":"2001::1", "v6_subnet":64, "no_v6":True/False, "no_v4":True/False,"shutdown":True/False}]
:param cli_type: click, klish
:return:
"""
commands = list()
if params:
for param in params:
if not param.get("portchannel_name"):
st.log("PortChannel Name not provided")
return False
if cli_type == "klish":
commands.append("interface PortChannel {}".format(param.get("portchannel_name").replace("PortChannel","")))
if not param.get("no_mtu"):
if param.get("mtu"):
commands.append("mtu {}".format(param.get("mtu")))
else:
commands.append("no mtu")
if not param.get("no_min_links"):
if param.get("min_links"):
commands.append("minimum-links {}".format(param.get("min_links")))
else:
commands.append("no minimum-links")
if param.get("fallback"):
commands.append("fallback enable")
else:
commands.append("no fallback")
if param.get("shutdown"):
commands.append("shutdown")
else:
commands.append("no shutdown")
if param.get("v4_address") and param.get("v4_subnet"):
commands.append("ip address {}/{}".format(param.get("v4_address"), param.get("v4_subnet")))
if param.get("v4_address") and param.get("no_v4"):
commands.append("no ip address {}".format(param.get("v4_address")))
if param.get("v6_address") and param.get("v6_subnet"):
commands.append("ipv6 address {}/{}".format(param.get("v6_address"), param.get("v6_subnet")))
if param.get("v6_address") and param.get("no_v6"):
commands.append("no ipv6 address {}".format(param.get("v6_address")))
commands.append("exit")
elif cli_type == "click":
if not param.get("fallback"):
if not param.get("min_links"):
command = "config portchannel add {}".format(param.get("portchannel_name"))
else:
command = "config portchannel add {} --min-links {}".format(param.get("portchannel_name"), param.get("min_links"))
else:
if not param.get("min_links"):
command = "config portchannel add {} --fallback=true".format(param.get("portchannel_name"))
else:
command = "config portchannel add {} --fallback=true --min-links {}".format(
param.get("portchannel_name"), param.get("min_links"))
commands.append(command)
else:
st.log("config_portchannel_properties : Unsupported CLI type")
return False
if commands:
st.config(dut, commands, type=cli_type)
return True
else:
st.log("config_portchannel_properties : PARAMS not provided")
return False
def config_port_mode(dut, channel_number, interface, mode="active", cli_type=""):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
"""
API to configure port mode for the given channel group
:param dut:
:param channel_number:
:param mode:
:param cli_type:
:return:
"""
if mode not in ["active","on"]:
st.log("Unsupported mode")
return False
if cli_type == "klish":
commands = list()
commands.append("interface {}".format(interface))
commands.append("channel-group {} mode {}".format(channel_number, mode))
commands.append("exit")
st.config(dut, commands, type=cli_type)
return True
else:
st.log("PORT MODE CONFIGURATION IS NOT SUPPORTED IN {} CLI".format(cli_type.upper()))
return False
def get_interface_portchannel(dut, channel_number=None, cli_type=""):
cli_type = st.get_ui_type(dut, cli_type=cli_type)
"""
API to execute show interface PortChannel {id} and get the result
:param dut:
:param channel_number:
:param cli_type:
:return:
"""
result = dict()
if cli_type == "klish":
stats_index = ["pkts","octets","multicasts","broadcasts","unicasts","errors","discards"]
command = "show interface PortChannel"
if channel_number:
command += " {}".format(channel_number)
output = st.show(dut, command, type=cli_type)
if output:
for key,value in output[0].items():
if key not in stats_index:
result[key] = value
for key,value in output[1].items():
if key in stats_index:
result["input_{}".format(key)] = value
for key,value in output[2].items():
if key in stats_index:
result["output_{}".format(key)] = value
return result
def verify_interface_portchannel(dut,**kwargs):
"""
API to verify interface portchannel
:param dut:
:param kwargs: {u'lacp_mode': 'active', u'partner_mac': '00:00:00:00:00:00', 'input_broadcasts': '3369',
'output_pkts': '74169', u'min_links': '1', 'input_errors': '0', u'ip_mtu': '1500', 'output_multicasts': '70186',
'input_pkts': '6224', 'input_multicasts': '2855', u'priority': '65535', u'state': 'up', u'partner_port': '0',
'output_unicasts': '0', 'output_discards': '0', u'actor_port': '56', 'output_errors': '0',
u'is_selected': 'True', 'output_octets': '10678293', u'members': 'Ethernet56', u'protocol_state': 'down',
'output_broadcasts': '3983', u'actor_mac': '90:b1:1c:f4:a8:7e', 'input_unicasts': '0', u'mtu': '1532',
u'channel_number': '1', u'mode': 'LACP', 'input_discards': '3', u'fallback': 'Enabled',
'input_octets': '1787177', u'pc_mac_address': '90:b1:1c:f4:a8:7e'}
:param cli_type:
:return:
"""
if not kwargs:
st.log("Parameters not provided")
return False
#cli_type = st.get_ui_type(dut, **kwargs)
cli_type = kwargs.get("cli_type", "klish")
if kwargs.get("channel_number"):
output = get_interface_portchannel(dut,channel_number=kwargs.get("channel_number"),cli_type=cli_type)
else:
output = get_interface_portchannel(dut,cli_type=cli_type)
if not output:
st.log("Empty output")
return False
kwargs.pop("cli_type", None)
for key,value in kwargs.items():
if str(output[key]) != str(value):
st.log("Mismatch in {} with value {} but expecting {}".format(key, output[key], value))
| |
exponential.
:param rate: the rate of the Exponential with either the same shape as specified for this vertex or scalar
"""
return Double(context.jvm_view().ExponentialVertex, label, cast_to_double_vertex(rate))
def Gamma(theta: vertex_constructor_param_types, k: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
One to one constructor for mapping some shape of theta and k to matching shaped gamma.
:param theta: the theta (scale) of the Gamma with either the same shape as specified for this vertex
:param k: the k (shape) of the Gamma with either the same shape as specified for this vertex
"""
return Double(context.jvm_view().GammaVertex, label, cast_to_double_vertex(theta), cast_to_double_vertex(k))
def Gaussian(mu: vertex_constructor_param_types, sigma: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().GaussianVertex, label, cast_to_double_vertex(mu), cast_to_double_vertex(sigma))
def HalfCauchy(scale: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().HalfCauchyVertex, label, cast_to_double_vertex(scale))
def HalfGaussian(sigma: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().HalfGaussianVertex, label, cast_to_double_vertex(sigma))
def InverseGamma(alpha: vertex_constructor_param_types, beta: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
One to one constructor for mapping some shape of alpha and beta to
alpha matching shaped Inverse Gamma.
:param alpha: the alpha of the Inverse Gamma with either the same shape as specified for this vertex or alpha scalar
:param beta: the beta of the Inverse Gamma with either the same shape as specified for this vertex or alpha scalar
"""
return Double(context.jvm_view().InverseGammaVertex, label, cast_to_double_vertex(alpha), cast_to_double_vertex(beta))
def KDE(samples: tensor_arg_types, bandwidth: float, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().KDEVertex, label, cast_to_double_tensor(samples), cast_to_double(bandwidth))
def Laplace(mu: vertex_constructor_param_types, beta: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
One to one constructor for mapping some shape of mu and sigma to
a matching shaped Laplace.
:param mu: the mu of the Laplace with either the same shape as specified for this vertex or a scalar
:param beta: the beta of the Laplace with either the same shape as specified for this vertex or a scalar
"""
return Double(context.jvm_view().LaplaceVertex, label, cast_to_double_vertex(mu), cast_to_double_vertex(beta))
def LogNormal(mu: vertex_constructor_param_types, sigma: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().LogNormalVertex, label, cast_to_double_vertex(mu), cast_to_double_vertex(sigma))
def Logistic(mu: vertex_constructor_param_types, s: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().LogisticVertex, label, cast_to_double_vertex(mu), cast_to_double_vertex(s))
def MultivariateGaussian(mu: vertex_constructor_param_types, covariance: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Matches a mu and full covariance matrix of some shape to a Multivariate Gaussian distribution. Mu should
be shape (batchShape, N) where N is the number of dimensions and batchShape can be any shape that is broadcastable
with the covariance batchShape if it is also batched. The covariance matrix should be shape (batchShape, N, N) where
the batchShape must be broadcastable with the batchShape of mu. Only the lower triangle of the covariance matrix
is used due to it being assumed to be a symmetric matrix. The upper triangle will be ignored.
:param mu: the mu of the Multivariate Gaussian
:param covariance: the covariance matrix of the Multivariate Gaussian
"""
return Double(context.jvm_view().MultivariateGaussianVertex, label, cast_to_double_vertex(mu), cast_to_double_vertex(covariance))
def Pareto(location: vertex_constructor_param_types, scale: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().ParetoVertex, label, cast_to_double_vertex(location), cast_to_double_vertex(scale))
def StudentT(v: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Double(context.jvm_view().StudentTVertex, label, cast_to_integer_vertex(v))
def Triangular(x_min: vertex_constructor_param_types, x_max: vertex_constructor_param_types, c: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
One to one constructor for mapping some shape of xMin, xMax and c to a matching shaped triangular.
:param x_min: the xMin of the Triangular with either the same shape as specified for this vertex or a scalar
:param x_max: the xMax of the Triangular with either the same shape as specified for this vertex or a scalar
:param c: the c of the Triangular with either the same shape as specified for this vertex or a scalar
"""
return Double(context.jvm_view().TriangularVertex, label, cast_to_double_vertex(x_min), cast_to_double_vertex(x_max), cast_to_double_vertex(c))
def Uniform(x_min: vertex_constructor_param_types, x_max: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
One to one constructor for mapping some shape of mu and sigma to
a matching shaped Uniform Vertex
:param x_min: the inclusive lower bound of the Uniform with either the same shape as specified for this vertex or a scalar
:param x_max: the exclusive upper bound of the Uniform with either the same shape as specified for this vertex or a scalar
"""
return Double(context.jvm_view().UniformVertex, label, cast_to_double_vertex(x_min), cast_to_double_vertex(x_max))
def ArcTan2(x: vertex_constructor_param_types, y: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Calculates the signed angle, in radians, between the positive x-axis and a ray to the point (x, y) from the origin
:param x: x coordinate
:param y: y coordinate
"""
return Vertex(context.jvm_view().ArcTan2Vertex, label, cast_to_vertex(x), cast_to_vertex(y))
def LogAddExp2(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().LogAddExp2Vertex, label, cast_to_vertex(left), cast_to_vertex(right))
def LogAddExp(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().LogAddExpVertex, label, cast_to_vertex(left), cast_to_vertex(right))
def SafeLogTimes(x: vertex_constructor_param_types, y: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().SafeLogTimesVertex, label, cast_to_vertex(x), cast_to_vertex(y))
def ArcCos(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Takes the inverse cosine of a vertex, Arccos(vertex)
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().ArcCosVertex, label, cast_to_vertex(input_vertex))
def ArcCosh(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().ArcCoshVertex, label, cast_to_vertex(input_vertex))
def ArcSin(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Takes the inverse sin of a vertex, Arcsin(vertex)
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().ArcSinVertex, label, cast_to_vertex(input_vertex))
def ArcSinh(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().ArcSinhVertex, label, cast_to_vertex(input_vertex))
def ArcTan(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Takes the inverse tan of a vertex, Arctan(vertex)
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().ArcTanVertex, label, cast_to_vertex(input_vertex))
def ArcTanh(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().ArcTanhVertex, label, cast_to_vertex(input_vertex))
def Ceil(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Applies the Ceiling operator to a vertex.
This maps a vertex to the smallest integer greater than or equal to its value
:param input_vertex: the vertex to be ceil'd
"""
return Vertex(context.jvm_view().CeilVertex, label, cast_to_vertex(input_vertex))
def CholeskyDecomposition(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().CholeskyDecompositionVertex, label, cast_to_vertex(input_vertex))
def CholeskyInverse(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().CholeskyInverseVertex, label, cast_to_vertex(input_vertex))
def Cos(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Takes the cosine of a vertex, Cos(vertex)
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().CosVertex, label, cast_to_vertex(input_vertex))
def Cosh(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().CoshVertex, label, cast_to_vertex(input_vertex))
def Digamma(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().DigammaVertex, label, cast_to_vertex(input_vertex))
def Exp2(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().Exp2Vertex, label, cast_to_vertex(input_vertex))
def ExpM1(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().ExpM1Vertex, label, cast_to_vertex(input_vertex))
def Exp(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Calculates the exponential of an input vertex
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().ExpVertex, label, cast_to_vertex(input_vertex))
def Floor(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Applies the Floor operator to a vertex.
This maps a vertex to the biggest integer less than or equal to its value
:param input_vertex: the vertex to be floor'd
"""
return Vertex(context.jvm_view().FloorVertex, label, cast_to_vertex(input_vertex))
def Log10(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().Log10Vertex, label, cast_to_vertex(input_vertex))
def Log1p(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().Log1pVertex, label, cast_to_vertex(input_vertex))
def Log2(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().Log2Vertex, label, cast_to_vertex(input_vertex))
def LogGamma(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Returns the log of the gamma of the inputVertex
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().LogGammaVertex, label, cast_to_vertex(input_vertex))
def Log(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Returns the natural logarithm, base e, of a vertex
:param input_vertex: the vertex
"""
return Vertex(context.jvm_view().LogVertex, label, cast_to_vertex(input_vertex))
def MatrixDeterminant(vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().MatrixDeterminantVertex, label, cast_to_vertex(vertex))
def MatrixInverse(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().MatrixInverseVertex, label, cast_to_vertex(input_vertex))
def Mean(input_vertex: vertex_constructor_param_types, over_dimensions: Collection[int], label: Optional[str]=None) -> Vertex:
"""
Performs a sum across specified dimensions. Negative dimension indexing is not supported.
:param input_vertex: the vertex to have its values summed
:param over_dimensions: dimensions to sum over
"""
return Vertex(context.jvm_view().MeanVertex, label, cast_to_vertex(input_vertex), cast_to_int_array(over_dimensions))
def ReplaceNaN(input_vertex: vertex_constructor_param_types, replace_with_value: float, label: Optional[str]=None) -> Vertex:
return Vertex(context.jvm_view().ReplaceNaNVertex, label, cast_to_vertex(input_vertex), cast_to_double(replace_with_value))
def Round(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
| |
range(len(feats), len(feats) + len(oth))]
names.append(self.names)
names = np.hstack(names)
return Features(feats, n_pts=n_pts, categories=cats, names=names)
return NotImplemented
############################################################################
### Properties to get at the basic data
@property
def total_points(self):
"The total number of points in all bags."
return self._features.shape[0]
@property
def dim(self):
"The dimensionality of the features."
return self._features.shape[1]
@property
def dtype(self):
"The data type of the feature vectors."
return self._features.dtype
features = property(lambda self: self.data['features'])
categories = category = property(lambda self: self.data['category'])
names = name = property(lambda self: self.data['name'])
# handle extras too, even though we don't know their names in advance...
# TODO: actually make these in the constructor, so tab-complete/etc works
def __getattr__(self, name):
if name in self._extra_names:
return self.data[name]
else:
return getattr(super(Features, self), name)
############################################################################
### Adding new extras to an existing object
def add_extra(self, name, values, dtype=None, inplace=False):
'''
Adds a single "extra" value to this Features object.
See add_extras for details.
'''
dtypes = None if dtype is None else [dtype]
return self.add_extras(names=[name], values=[values], dtypes=dtypes,
inplace=inplace)
def add_extras(self, names, values, dtypes=None, inplace=False):
'''
Adds new "extra" values to this Features object.
Note that for implementation reasons, this requires making a copy of
the .data array containing all the metadata (though not the actual
features array itself).
Arguments:
- names: a list of names for the new extra values
- values: a list of the actual values for the new extras. Should
be broadcastable to be of shape (len(self),).
- dtypes (optional): a list of the data types for the new extras.
If not passed, uses the dtype of np.asarray(val) for each
value. If you don't pass dtypes and values contains
objects other than numpy arrays, an extra copy will be
made during this process.
- inplace (optional, default False): if True, adds the extra to
this object (though metadata is copied as noted above).
If False, returns a new object with the extra added. Note
that the new object will be like a shallow copy of this
one: the features array and any object-type extras will
be shared.
'''
# Can't use numpy.lib.recfunctions.append_fields:
# https://github.com/numpy/numpy/issues/2346
len_set = set([len(names), len(values)])
if dtypes is not None:
len_set.add(len(dtypes))
if len(len_set) != 1:
raise ValueError("names, values, and dtypes (if passed) should be "
"of same length")
name_set = set(names)
if len(name_set) != len(names):
raise ValueError("can't repeat names...")
elif not name_set.isdisjoint(self.data.dtype.names):
raise ValueError("can't use names already in use")
if dtypes is None:
values = [np.asarray(val) for val in values]
dtypes = [val.dtype for val in values]
old_descr = self.data.dtype.descr
new_descr = strict_zip(names, dtypes)
new = np.empty(len(self), dtype=old_descr + new_descr)
for name, dtype in old_descr:
new[name] = self.data[name]
for name, value in izip(names, values):
new[name] = value
if inplace:
self.data = new
self._extra_names = self._extra_names.union(names)
else:
return Features.from_data(new)
############################################################################
### Transforming the features
def _replace_bags(self, bags, n_pts=None, inplace=False):
if n_pts is None:
n_pts = [b.shape[0] for b in bags]
bags = np.vstack(bags)
else:
bags = np.asarray(bags)
assert bags.ndim == 2
if inplace:
self._n_pts = np.asarray(n_pts)
self._boundaries = np.hstack([[0], np.cumsum(self._n_pts)])
self._features = bags
self._refresh_features()
else:
return self.__class__(
bags, n_pts=n_pts, categories=self.categories, names=self.names,
**dict((k, self.data[k]) for k in self._extra_names))
def _apply_transform(self, transformer, fit_first, inplace=False,
dtype=None):
'''
Transforms the features using an sklearn-style transformer object that
should be fit to the full, stacked feature matrix. Assumes that the
transformer supports the "copy" attribute, and that it does not change
the number or order of points (though it may change their
dimensionality).
transformer: the transformer object
fit_first: whether to fit the transformer to the objects first
dtype: fit to the features.astype(dtype) if not None
By default, returns a new Features instance.
If inplace is passed, modifies this instance; doesn't return anything.
'''
transformer.copy = not inplace
feats = self._features
if dtype is not None:
feats = feats.astype(dtype)
if fit_first:
transformed = transformer.fit_transform(feats)
else:
transformed = transformer.transform(feats)
return self._replace_bags(
transformed, n_pts=self._n_pts, inplace=inplace)
def pca(self, pca=None, unfit_pca=None,
k=None, varfrac=DEFAULT_VARFRAC, randomize=False, whiten=False,
dtype=None,
ret_pca=False, inplace=False):
'''
Runs the features through principal components analysis to reduce their
dimensionality.
By default, returns a new Features instance.
If inplace is passed, modifies this instance; doesn't return anything.
If ret_pca is passed: returns the PCA object as well as whatever else
it would have returned.
If `pca` is passed, uses that pre-fit PCA object to transform. This is
useful for transforming test objects consistently with training objects.
Otherwise, if `unfit_pca` is passed, that object's fit_transform()
method is called to fit the samples and transform them.
Otherwise, the following options specify which type of PCA to perform:
k: a dimensionality to reduce to. Default: use varfrac instead.
varfrac: the fraction of variance to preserve. Overridden by k.
Default: 0.7. Can't be used for randomized or sparse PCA.
randomize: use a randomized PCA implementation. Default: no.
whiten: whether to whiten the inputs, removing linear correlations
across features
dtype: the dtype of the feature matrix to use.
'''
# figure out what PCA instance we should use
if pca is not None:
fit_first = False
elif unfit_pca is not None:
pca = unfit_pca
fit_first = True
else:
from sklearn.decomposition import PCA, RandomizedPCA
fit_first = True
if k is None:
if randomize:
raise ValueError("can't randomize without a specific k")
pca = PCA(varfrac, whiten=whiten)
else:
pca = (RandomizedPCA if randomize else PCA)(k, whiten=whiten)
r = self._apply_transform(pca, fit_first=fit_first, inplace=inplace)
if ret_pca:
return pca if inplace else (r, pca)
else:
return r
def standardize(self, scaler=None, ret_scaler=False, inplace=False,
cast_dtype=np.float32):
'''
Standardizes the features so that each dimension has zero mean and unit
variance.
By default, returns a new Features instance.
If inplace is passed, modifies this instance; doesn't return anything.
If ret_scaler is passed: returns the scaler object as well as whatever
else it would have returned.
If cast_dtype is not None, casts non-float data arrays to this dtype
first.
If `scaler` is passed, uses that pre-fit scaler to transform. This is
useful for transforming test objects consistently with training objects.
'''
fit_first = False
if scaler is None:
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
fit_first = True
kw = {'fit_first': fit_first, 'inplace': inplace}
if self._features.dtype.kind != 'f':
kw['dtype'] = cast_dtype
r = self._apply_transform(scaler, **kw)
if ret_scaler:
return scaler if inplace else (r, scaler)
else:
return r
def normalize(self, norm='l2', inplace=False, cast_dtype=np.float32):
'''
Normalizes the features so that each vector has unit norm (l1 or l2).
By default, returns a new Features instance.
If inplace is passed, modifies this instance; doesn't return anything.
If cast_dtype is not None, casts non-float data arrays to this dtype
first.
norm: 'l2' (default) or 'l1'.
This transformation is stateless, so unlike pca() or standardize()
there's no point in returning the normalizer object.
'''
from sklearn.preprocessing import Normalizer
normalizer = Normalizer(norm)
dtype = None if self.dtype.kind == 'f' else cast_dtype
return self._apply_transform(
normalizer, fit_first=False, inplace=inplace, dtype=dtype)
def condense_kmeans(self, n_clusters, max_iter=20, inplace=False,
progressbar=False, cast_dtype=np.float32,
library='vlfeat', algorithm='lloyd'):
'''
Condenses the number of points in a sample set through k-means.
'''
feats_iter = iter(self.features)
if self.dtype.kind != 'f':
feats_iter = (np.asarray(b, dtype=cast_dtype) for b in feats_iter)
if progressbar:
from .mp_utils import progress
feats_iter = progress(maxval=len(self))(feats_iter)
if library == 'vlfeat':
fn = self._condense_kmeans_vlfeat
elif library == 'sklearn':
fn = self._condense_kmeans_sklearn
do = fn(n_clusters=n_clusters, max_iter=max_iter, algorithm=algorithm)
new_bags = [bag if bag.shape[0] <= n_clusters else do(bag)
for bag in feats_iter]
return self._replace_bags(new_bags, inplace=inplace)
def _condense_kmeans_vlfeat(self, n_clusters, max_iter=20,
algorithm='lloyd'):
from vlfeat import vl_kmeans
return partial(vl_kmeans, num_centers=n_clusters, algorithm=algorithm,
max_iter=max_iter, num_rep=1, initialization='random')
def _condense_kmeans_sklearn(self, n_clusters, max_iter=20,
algorithm='minibatch'):
if algorithm == 'minibatch':
from sklearn.cluster import MiniBatchKMeans
cls = partial(MiniBatchKMeans, compute_labels=False)
elif algorithm in ('batch', 'lloyd'):
from sklearn.cluster import KMeans
cls = partial(KMeans, n_init=1, n_jobs=1)
# most of the work is parallelized by MKL. still, not super fast.
kmeans = cls(n_clusters=n_clusters, init='random', max_iter=max_iter)
def do(bag):
kmeans.fit(bag)
return kmeans.cluster_centers_
return do
def bag_of_words(self, n_codewords, max_iter=100, num_rep=1,
cast_dtype=np.float32,
library='vlfeat', algorithm='lloyd'):
'''
Transforms each bag into a single vector with the bag of words
representation:
1. Run k-means (with n_codewords | |
bigquery_options is not None:
pulumi.set(__self__, "bigquery_options", bigquery_options)
if description is not None:
pulumi.set(__self__, "description", description)
if destination is not None:
pulumi.set(__self__, "destination", destination)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if exclusions is not None:
pulumi.set(__self__, "exclusions", exclusions)
if filter is not None:
pulumi.set(__self__, "filter", filter)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
if unique_writer_identity is not None:
pulumi.set(__self__, "unique_writer_identity", unique_writer_identity)
if writer_identity is not None:
pulumi.set(__self__, "writer_identity", writer_identity)
@property
@pulumi.getter(name="bigqueryOptions")
def bigquery_options(self) -> Optional[pulumi.Input['ProjectSinkBigqueryOptionsArgs']]:
"""
Options that affect sinks exporting data to BigQuery. Structure documented below.
"""
return pulumi.get(self, "bigquery_options")
@bigquery_options.setter
def bigquery_options(self, value: Optional[pulumi.Input['ProjectSinkBigqueryOptionsArgs']]):
pulumi.set(self, "bigquery_options", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of this exclusion.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def destination(self) -> Optional[pulumi.Input[str]]:
"""
The destination of the sink (or, in other words, where logs are written to). Can be a
Cloud Storage bucket, a PubSub topic, a BigQuery dataset or a Cloud Logging bucket . Examples:
```python
import pulumi
```
The writer associated with the sink must have access to write to the above resource.
"""
return pulumi.get(self, "destination")
@destination.setter
def destination(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination", value)
@property
@pulumi.getter
def disabled(self) -> Optional[pulumi.Input[bool]]:
"""
If set to True, then this exclusion is disabled and it does not exclude any log entries.
"""
return pulumi.get(self, "disabled")
@disabled.setter
def disabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disabled", value)
@property
@pulumi.getter
def exclusions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ProjectSinkExclusionArgs']]]]:
"""
Log entries that match any of the exclusion filters will not be exported. If a log entry is matched by both filter and one of exclusion_filters it will not be exported. Can be repeated multiple times for multiple exclusions. Structure is documented below.
"""
return pulumi.get(self, "exclusions")
@exclusions.setter
def exclusions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ProjectSinkExclusionArgs']]]]):
pulumi.set(self, "exclusions", value)
@property
@pulumi.getter
def filter(self) -> Optional[pulumi.Input[str]]:
"""
An advanced logs filter that matches the log entries to be excluded. By using the sample function, you can exclude less than 100% of the matching log entries. See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced_filters) for information on how to
write a filter.
"""
return pulumi.get(self, "filter")
@filter.setter
def filter(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "filter", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A client-assigned identifier, such as `load-balancer-exclusion`. Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods. First character has to be alphanumeric.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project to create the sink in. If omitted, the project associated with the provider is
used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="uniqueWriterIdentity")
def unique_writer_identity(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not to create a unique identity associated with this sink. If `false`
(the default), then the `writer_identity` used is `serviceAccount:<EMAIL>`. If `true`,
then a unique service account is created and used for this sink. If you wish to publish logs across projects or utilize
`bigquery_options`, you must set `unique_writer_identity` to true.
"""
return pulumi.get(self, "unique_writer_identity")
@unique_writer_identity.setter
def unique_writer_identity(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "unique_writer_identity", value)
@property
@pulumi.getter(name="writerIdentity")
def writer_identity(self) -> Optional[pulumi.Input[str]]:
"""
The identity associated with this sink. This identity must be granted write access to the
configured `destination`.
"""
return pulumi.get(self, "writer_identity")
@writer_identity.setter
def writer_identity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "writer_identity", value)
class ProjectSink(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
bigquery_options: Optional[pulumi.Input[pulumi.InputType['ProjectSinkBigqueryOptionsArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
exclusions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ProjectSinkExclusionArgs']]]]] = None,
filter: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
unique_writer_identity: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
## Import
Project-level logging sinks can be imported using their URI, e.g.
```sh
$ pulumi import gcp:logging/projectSink:ProjectSink my_sink projects/my-project/sinks/my-sink
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ProjectSinkBigqueryOptionsArgs']] bigquery_options: Options that affect sinks exporting data to BigQuery. Structure documented below.
:param pulumi.Input[str] description: A description of this exclusion.
:param pulumi.Input[str] destination: The destination of the sink (or, in other words, where logs are written to). Can be a
Cloud Storage bucket, a PubSub topic, a BigQuery dataset or a Cloud Logging bucket . Examples:
```python
import pulumi
```
The writer associated with the sink must have access to write to the above resource.
:param pulumi.Input[bool] disabled: If set to True, then this exclusion is disabled and it does not exclude any log entries.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ProjectSinkExclusionArgs']]]] exclusions: Log entries that match any of the exclusion filters will not be exported. If a log entry is matched by both filter and one of exclusion_filters it will not be exported. Can be repeated multiple times for multiple exclusions. Structure is documented below.
:param pulumi.Input[str] filter: An advanced logs filter that matches the log entries to be excluded. By using the sample function, you can exclude less than 100% of the matching log entries. See [Advanced Log Filters](https://cloud.google.com/logging/docs/view/advanced_filters) for information on how to
write a filter.
:param pulumi.Input[str] name: A client-assigned identifier, such as `load-balancer-exclusion`. Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods. First character has to be alphanumeric.
:param pulumi.Input[str] project: The ID of the project to create the sink in. If omitted, the project associated with the provider is
used.
:param pulumi.Input[bool] unique_writer_identity: Whether or not to create a unique identity associated with this sink. If `false`
(the default), then the `writer_identity` used is `serviceAccount:<EMAIL>`. If `true`,
then a unique service account is created and used for this sink. If you wish to publish logs across projects or utilize
`bigquery_options`, you must set `unique_writer_identity` to true.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ProjectSinkArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
Project-level logging sinks can be imported using their URI, e.g.
```sh
$ pulumi import gcp:logging/projectSink:ProjectSink my_sink projects/my-project/sinks/my-sink
```
:param str resource_name: The name of the resource.
:param ProjectSinkArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ProjectSinkArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
bigquery_options: Optional[pulumi.Input[pulumi.InputType['ProjectSinkBigqueryOptionsArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
exclusions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ProjectSinkExclusionArgs']]]]] = None,
filter: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
unique_writer_identity: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ProjectSinkArgs.__new__(ProjectSinkArgs)
__props__.__dict__["bigquery_options"] = bigquery_options
__props__.__dict__["description"] = description
if destination is None and not opts.urn:
raise TypeError("Missing required property 'destination'")
__props__.__dict__["destination"] = destination
__props__.__dict__["disabled"] = disabled
__props__.__dict__["exclusions"] = exclusions
__props__.__dict__["filter"] = filter
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
__props__.__dict__["unique_writer_identity"] = unique_writer_identity
__props__.__dict__["writer_identity"] = None
super(ProjectSink, __self__).__init__(
'gcp:logging/projectSink:ProjectSink',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
bigquery_options: Optional[pulumi.Input[pulumi.InputType['ProjectSinkBigqueryOptionsArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
destination: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
exclusions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ProjectSinkExclusionArgs']]]]] = None,
filter: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
unique_writer_identity: Optional[pulumi.Input[bool]] = None,
writer_identity: Optional[pulumi.Input[str]] = None) -> 'ProjectSink':
"""
Get an existing ProjectSink resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ProjectSinkBigqueryOptionsArgs']] bigquery_options: Options that affect | |
<reponame>NCAR/lrose-uw-general
#!/usr/bin/env python
#===========================================================================
#
# Produce plots for ZDR bias by volume - paper
#
#===========================================================================
import os
import sys
import subprocess
from optparse import OptionParser
import numpy as np
from numpy import convolve
from numpy import linalg, array, ones
import matplotlib.pyplot as plt
from matplotlib import dates
import math
import datetime
import contextlib
def main():
# globals
global options
global debug
global startTime
global endTime
# parse the command line
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option('--debug',
dest='debug', default=False,
action="store_true",
help='Set debugging on')
parser.add_option('--verbose',
dest='verbose', default=False,
action="store_true",
help='Set verbose debugging on')
parser.add_option('--bias_file',
dest='biasFilePath',
default='../data/olympex/zdr_bias.npol.txt',
help='File path for bias results')
parser.add_option('--title',
dest='title',
default='NPOL ZDR BIAS FROM ICE',
help='Title for plot')
parser.add_option('--width',
dest='figWidthMm',
default=400,
help='Width of figure in mm')
parser.add_option('--height',
dest='figHeightMm',
default=200,
help='Height of figure in mm')
parser.add_option('--lenMean',
dest='lenMean',
default=1,
help='Len of moving mean filter')
parser.add_option('--start',
dest='startTime',
default='2015 11 05 00 00 00',
help='Start time for XY plot')
parser.add_option('--end',
dest='endTime',
default='2016 01 15 00 00 00',
help='End time for XY plot')
(options, args) = parser.parse_args()
if (options.verbose == True):
options.debug = True
year, month, day, hour, minute, sec = options.startTime.split()
startTime = datetime.datetime(int(year), int(month), int(day),
int(hour), int(minute), int(sec))
year, month, day, hour, minute, sec = options.endTime.split()
endTime = datetime.datetime(int(year), int(month), int(day),
int(hour), int(minute), int(sec))
if (options.debug == True):
print >>sys.stderr, "Running %prog"
print >>sys.stderr, " biasFilePath: ", options.biasFilePath
print >>sys.stderr, " startTime: ", startTime
print >>sys.stderr, " endTime: ", endTime
# read in column headers for bias results
iret, biasHdrs, biasData = readColumnHeaders(options.biasFilePath)
if (iret != 0):
sys.exit(-1)
# read in data for bias results
biasData, biasTimes = readInputData(options.biasFilePath, biasHdrs, biasData)
# prepare the data for plotting
prepareData(biasData, biasTimes)
# render the plot
doPlot()
# done
sys.exit(0)
########################################################################
# Read columm headers for the data
# this is in the first line
def readColumnHeaders(filePath):
colHeaders = []
colData = {}
fp = open(filePath, 'r')
line = fp.readline()
fp.close()
commentIndex = line.find("#")
if (commentIndex == 0):
# header
colHeaders = line.lstrip("# ").rstrip("\n").split()
if (options.debug == True):
print >>sys.stderr, "Reading file: ", filePath
for icol, var in enumerate(colHeaders, start=0):
print >>sys.stderr, "colHeader[", icol, "] = ", colHeaders[icol]
else:
print >>sys.stderr, "ERROR - readColumnHeaders"
print >>sys.stderr, " First line does not start with #"
return -1, colHeaders, colData
for icol, var in enumerate(colHeaders, start=0):
colData[var] = []
return 0, colHeaders, colData
########################################################################
# Read in the data
def readInputData(filePath, colHeaders, colData):
# open file
fp = open(filePath, 'r')
lines = fp.readlines()
# read in a line at a time, set colData
for line in lines:
commentIndex = line.find("#")
if (commentIndex >= 0):
continue
# data
data = line.strip().split()
if (len(data) != len(colHeaders)):
if (options.debug == True):
print >>sys.stderr, "skipping line: ", line
continue;
for index, var in enumerate(colHeaders, start=0):
# print >>sys.stderr, "index, data[index]: ", index, ", ", data[index]
if (var == 'count' or var == 'year' or var == 'month' or var == 'day' or \
var == 'hour' or var == 'min' or var == 'sec' or \
var == 'unix_time'):
colData[var].append(int(data[index]))
else:
colData[var].append(float(data[index]))
fp.close()
# load observation times array
year = colData['year']
month = colData['month']
day = colData['day']
hour = colData['hour']
minute = colData['min']
sec = colData['sec']
obsTimes = []
for ii, var in enumerate(year, start=0):
thisTime = datetime.datetime(year[ii], month[ii], day[ii],
hour[ii], minute[ii], sec[ii])
obsTimes.append(thisTime)
if (options.verbose == True):
print >>sys.stderr, "Read in file: ", filePath
for itime, obsTime in enumerate(obsTimes, start=0):
sys.stdout.write('===>> ')
sys.stdout.write(str(obsTime))
sys.stdout.write(': ')
for icol, var in enumerate(colHeaders, start=0):
sys.stdout.write(colHeaders[icol] + ':')
sys.stdout.write(str(colData[var][itime]))
sys.stdout.write(' ')
sys.stdout.write('\n')
return colData, obsTimes
########################################################################
# Moving average filter
def movingAverage(values, window):
if (window < 2):
return values
weights = np.repeat(1.0, window)/window
sma = np.convolve(values, weights, 'same')
return sma
########################################################################
# Prepare data sets for plotting
def prepareData(biasData, biasTimes):
lenMeanFilter = int(options.lenMean)
# set up arrays for ZDR bias
global btimes
btimes = np.array(biasTimes).astype(datetime.datetime)
biasIce = np.array(biasData["ZdrInIcePerc25.00"]).astype(np.double)
biasIce = movingAverage(biasIce, lenMeanFilter)
validIce = (np.isfinite(biasIce) & (btimes >= startTime) & (btimes <= endTime))
biasBragg = np.array(biasData["ZdrInBraggPerc32.00"]).astype(np.double)
biasBragg = movingAverage(biasBragg, lenMeanFilter)
validBragg = (np.isfinite(biasBragg) & (btimes >= startTime) & (btimes <= endTime))
validIce = np.isfinite(biasIce)
validBragg = np.isfinite(biasBragg)
global validIceBtimes, validIceVals
validIceBtimes = btimes[validIce]
validIceVals = biasIce[validIce]
global validBraggBtimes, validBraggVals
validBraggBtimes = btimes[validBragg]
validBraggVals = biasBragg[validBragg]
# load up receiver gain etc - axis 4
(dailyTimeIce, dailyValIce) = computeDailyStats(validIceBtimes, validIceVals)
(dailyTimeBragg, dailyValBragg) = computeDailyStats(validBraggBtimes, validBraggVals)
return
# site temp
global tempSite, validTempSite
tempSite = np.array(biasData["TempSite"]).astype(np.double)
validTempSite = np.isfinite(tempSite)
# ZDR bias vs temp
global tempTimes, tempIceVals, tempIceBias
tempTimes = []
tempIceVals = []
tempIceBias = []
for ii, biasVal in enumerate(validIceVals, start=0):
btime = validIceBtimes[ii]
tempTime, tempVal = getClosestTemp(btime, btimes, tempSite)
tempTimes.append(tempTime)
tempIceVals.append(tempVal)
tempIceBias.append(biasVal)
if (options.verbose):
print >>sys.stderr, "==>> biasTime, biasVal, tempTime, tempVal:", \
btime, biasVal, tempTime, tempVal
global tempMean, tempSdev, tempNorm
tempMean = np.mean(tempSite)
tempSdev = np.std(tempSite)
if (options.debug):
print >>sys.stderr, "==>> tempMean, tempSdev: ", tempMean, tempSdev
tempNorm = (tempSite - tempMean) / (tempSdev * 10.0)
# linear regression for bias vs temp
# obtain the fit, ww[0] is slope, ww[1] is intercept
global AA, ww, tempRegrX, tempRegrY, minTemp, maxTemp
AA = array([tempIceVals, ones(len(tempIceVals))])
ww = linalg.lstsq(AA.T, tempIceBias)[0]
minTemp = min(tempIceVals)
maxTemp = max(tempIceVals)
tempRegrX = []
tempRegrY = []
tempRegrX.append(minTemp)
tempRegrX.append(maxTemp)
tempRegrY.append(ww[0] * minTemp + ww[1])
tempRegrY.append(ww[0] * maxTemp + ww[1])
# correct bias for linear regression
slope = ww[0]
intercept = ww[1]
global tempCorrBias
tempCorrBias = []
for ii, rtime in enumerate(tempTimes, start=0):
tempC = tempIceVals[ii]
biasDb = tempIceBias[ii]
tempCorr = intercept + tempC * slope
corrBias = biasDb - tempCorr
tempCorrBias.append(corrBias)
########################################################################
# Plot
def doPlot():
fileName = options.biasFilePath
titleStr = "File: " + fileName
hfmt = dates.DateFormatter('%y/%m/%d')
lenMeanFilter = int(options.lenMean)
# set up plots
widthIn = float(options.figWidthMm) / 25.4
htIn = float(options.figHeightMm) / 25.4
fig1 = plt.figure(1, (widthIn, htIn))
ax1a = fig1.add_subplot(1,1,1,xmargin=0.0)
fig2 = plt.figure(2, (widthIn/2, htIn))
ax2a = fig2.add_subplot(1,1,1,xmargin=1.0, ymargin=1.0)
oneDay = datetime.timedelta(1.0)
timeRange = endTime - startTime
timeRangeMargin = timeRange / 50
#ax1a.set_xlim(startTime - timeRangeMargin, endTime + timeRangeMargin)
ax1a.set_xlim(startTime - oneDay, endTime + oneDay)
title = "NPOL ZDR bias in ice"
ax1a.set_title(title)
ax1a.plot(validBraggBtimes, validBraggVals, \
"o", label = 'ZDR Bias In Bragg', color='blue')
#ax1a.plot(validBraggBtimes, validBraggVals, \
# label = 'ZDR Bias In Bragg', linewidth=1, color='blue')
ax1a.plot(validIceBtimes, validIceVals, \
"o", label = 'ZDR Bias In Ice', color='green')
#ax1a.plot(validIceBtimes, validIceVals, \
# label = 'ZDR Bias In Ice', linewidth=1, color='green')
#ax1a.plot(tempTimes, tempNorm, \
# label = 'Norm-temps', color='orange', linewidth=1)
#ax1a.plot(btimes, tempNorm, \
# label = 'Norm-temps', color='orange', linewidth=1)
configDateAxis(ax1a, -9999, 9999, "ZDR Bias (dB)", 'upper right')
# ZDR vs temp
# label2a = "NPOL ZDR Bias In Ice = " + ("%.5f" % ww[0]) + " * temp + " + ("%.3f" % ww[1])
# ax2a.plot(tempIceVals, tempIceBias,
# "x", label = label2a, color = 'blue')
# ax2a.plot(tempRegrX, tempRegrY, linewidth=3, color = 'blue')
# legend2a = ax2a.legend(loc="upper left", ncol=4)
# for label2a in legend2a.get_texts():
# label2a.set_fontsize(12)
# ax2a.set_xlabel("Site temperature (C)")
# ax2a.set_ylabel("ZDR Bias (dB)")
# ax2a.grid(True)
# ax2a.set_ylim([-0.5, 0.5])
# ax2a.set_xlim([minTemp - 1, maxTemp + 1])
# title2a = "NPOL ZDR Bias In Ice Vs Temp\n" + str(startTime) + " - " + str(endTime)
#title2a = "NPOL ZDR Bias In Ice Vs Temp"
# ax2a.set_title(title2a)
fig1.autofmt_xdate()
fig1.tight_layout()
fig1.subplots_adjust(bottom=0.08, left=0.06, right=0.97, top=0.96)
plt.show()
########################################################################
# initialize legends etc
def configDateAxis(ax, miny, maxy, ylabel, legendLoc):
legend = ax.legend(loc=legendLoc, ncol=4)
for label in legend.get_texts():
label.set_fontsize('x-small')
ax.set_xlabel("Date")
ax.set_ylabel(ylabel)
ax.grid(True)
if (miny > -9990 and maxy > -9990):
ax.set_ylim([miny, maxy])
hfmt = dates.DateFormatter('%y/%m/%d')
#hfmt = dates.DateFormatter('%y/%m/%d-%H:%M:%S')
ax.xaxis.set_major_locator(dates.DayLocator())
#ax.xaxis.set_major_locator(dates.HourLocator())
ax.xaxis.set_major_formatter(hfmt)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(8)
########################################################################
# get temp closest in time to the search time
def getClosestTemp(biasTime, tempTimes, obsTemps):
twoHours = datetime.timedelta(0.0, 7200.0)
validTimes = ((tempTimes > (biasTime - twoHours)) & \
(tempTimes < (biasTime + twoHours)))
if (len(validTimes) < 1):
return (biasTime, float('NaN'))
searchTimes = tempTimes[validTimes]
searchTemps = obsTemps[validTimes]
if (len(searchTimes) < 1 or len(searchTemps) < 1):
return (biasTime, float('NaN'))
minDeltaTime = 1.0e99
ttime = searchTimes[0]
temp = searchTemps[0]
for ii, temptime in enumerate(searchTimes, start=0):
if (np.isfinite(searchTemps[ii])):
ttemp = searchTemps[ii]
deltaTime = math.fabs((temptime - biasTime).total_seconds())
if (deltaTime < minDeltaTime):
minDeltaTime = deltaTime
temp = ttemp
ttime = temptime
return (ttime, temp)
########################################################################
# get tx power | |
# deafrica_classificationtools.py
'''
Description: This file contains a set of python functions for conducting
machine learning classification on remote sensing data from Digital Earth
Africa's Open Data Cube
License: The code in this notebook is licensed under the Apache License,
Version 2.0 (https://www.apache.org/licenses/LICENSE-2.0). Digital Earth
Africa data is licensed under the Creative Commons by Attribution 4.0
license (https://creativecommons.org/licenses/by/4.0/).
Contact: If you need assistance, please post a question on the Open Data
Cube Slack channel (http://slack.opendatacube.org/) or on the GIS Stack
Exchange (https://gis.stackexchange.com/questions/ask?tags=open-data-cube)
using the `open-data-cube` tag (you can view previously asked questions
here: https://gis.stackexchange.com/questions/tagged/open-data-cube).
If you would like to report an issue with this script, you can file one on
Github https://github.com/digitalearthafrica/deafrica-sandbox-notebooks/issues
Last modified: September 2020
'''
import os
import sys
import joblib
import datacube
import rasterio
import numpy as np
import xarray as xr
from tqdm import tqdm
import dask.array as da
import geopandas as gpd
from copy import deepcopy
import multiprocessing as mp
import dask.distributed as dd
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
from sklearn.cluster import KMeans
from sklearn.base import clone
from datacube.utils import masking
from sklearn.base import BaseEstimator
from sklearn.utils import check_random_state
from abc import ABCMeta, abstractmethod
from datacube.utils import geometry
from sklearn.base import ClusterMixin
from dask.diagnostics import ProgressBar
from rasterio.features import rasterize
from sklearn.impute import SimpleImputer
from rasterio.features import geometry_mask
from dask_ml.wrappers import ParallelPostFit
from sklearn.mixture import GaussianMixture
from datacube.utils.geometry import assign_crs
from datacube_stats.statistics import GeoMedian
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import KFold, ShuffleSplit
from sklearn.model_selection import BaseCrossValidator
import warnings
from dea_tools.spatial import xr_rasterize
from dea_tools.bandindices import calculate_indices
from dea_tools.datahandling import load_ard, mostcommon_crs
def sklearn_flatten(input_xr):
"""
Reshape a DataArray or Dataset with spatial (and optionally
temporal) structure into an np.array with the spatial and temporal
dimensions flattened into one dimension.
This flattening procedure enables DataArrays and Datasets to be used
to train and predict
with sklearn models.
Last modified: September 2019
Parameters
----------
input_xr : xarray.DataArray or xarray.Dataset
Must have dimensions 'x' and 'y', may have dimension 'time'.
Dimensions other than 'x', 'y' and 'time' are unaffected by the
flattening.
Returns
----------
input_np : numpy.array
A numpy array corresponding to input_xr.data (or
input_xr.to_array().data), with dimensions 'x','y' and 'time'
flattened into a single dimension, which is the first axis of
the returned array. input_np contains no NaNs.
"""
# cast input Datasets to DataArray
if isinstance(input_xr, xr.Dataset):
input_xr = input_xr.to_array()
# stack across pixel dimensions, handling timeseries if necessary
if 'time' in input_xr.dims:
stacked = input_xr.stack(z=['x', 'y', 'time'])
else:
stacked = input_xr.stack(z=['x', 'y'])
# finding 'bands' dimensions in each pixel - these will not be
# flattened as their context is important for sklearn
pxdims = []
for dim in stacked.dims:
if dim != 'z':
pxdims.append(dim)
# mask NaNs - we mask pixels with NaNs in *any* band, because
# sklearn cannot accept NaNs as input
mask = np.isnan(stacked)
if len(pxdims) != 0:
mask = mask.any(dim=pxdims)
# turn the mask into a numpy array (boolean indexing with xarrays
# acts weird)
mask = mask.data
# the dimension we are masking along ('z') needs to be the first
# dimension in the underlying np array for the boolean indexing to work
stacked = stacked.transpose('z', *pxdims)
input_np = stacked.data[~mask]
return input_np
def sklearn_unflatten(output_np, input_xr):
"""
Reshape a numpy array with no 'missing' elements (NaNs) and
'flattened' spatiotemporal structure into a DataArray matching the
spatiotemporal structure of the DataArray
This enables an sklearn model's prediction to be remapped to the
correct pixels in the input DataArray or Dataset.
Last modified: September 2019
Parameters
----------
output_np : numpy.array
The first dimension's length should correspond to the number of
valid (non-NaN) pixels in input_xr.
input_xr : xarray.DataArray or xarray.Dataset
Must have dimensions 'x' and 'y', may have dimension 'time'.
Dimensions other than 'x', 'y' and 'time' are unaffected by the
flattening.
Returns
----------
output_xr : xarray.DataArray
An xarray.DataArray with the same dimensions 'x', 'y' and 'time'
as input_xr, and the same valid (non-NaN) pixels. These pixels
are set to match the data in output_np.
"""
# the output of a sklearn model prediction should just be a numpy array
# with size matching x*y*time for the input DataArray/Dataset.
# cast input Datasets to DataArray
if isinstance(input_xr, xr.Dataset):
input_xr = input_xr.to_array()
# generate the same mask we used to create the input to the sklearn model
if 'time' in input_xr.dims:
stacked = input_xr.stack(z=['x', 'y', 'time'])
else:
stacked = input_xr.stack(z=['x', 'y'])
pxdims = []
for dim in stacked.dims:
if dim != 'z':
pxdims.append(dim)
mask = np.isnan(stacked)
if len(pxdims) != 0:
mask = mask.any(dim=pxdims)
# handle multivariable output
output_px_shape = ()
if len(output_np.shape[1:]):
output_px_shape = output_np.shape[1:]
# use the mask to put the data in all the right places
output_ma = np.ma.empty((len(stacked.z), *output_px_shape))
output_ma[~mask] = output_np
output_ma[mask] = np.ma.masked
# set the stacked coordinate to match the input
output_xr = xr.DataArray(
output_ma,
coords={'z': stacked['z']},
dims=[
'z',
*['output_dim_' + str(idx) for idx in range(len(output_px_shape))]
])
output_xr = output_xr.unstack()
return output_xr
def fit_xr(model, input_xr):
"""
Utilise our wrappers to fit a vanilla sklearn model.
Last modified: September 2019
Parameters
----------
model : scikit-learn model or compatible object
Must have a fit() method that takes numpy arrays.
input_xr : xarray.DataArray or xarray.Dataset.
Must have dimensions 'x' and 'y', may have dimension 'time'.
Returns
----------
model : a scikit-learn model which has been fitted to the data in
the pixels of input_xr.
"""
model = model.fit(sklearn_flatten(input_xr))
return model
def predict_xr(model,
input_xr,
chunk_size=None,
persist=False,
proba=False,
clean=False,
return_input=False):
"""
Using dask-ml ParallelPostfit(), runs the parallel
predict and predict_proba methods of sklearn
estimators. Useful for running predictions
on a larger-than-RAM datasets.
Last modified: September 2020
Parameters
----------
model : scikit-learn model or compatible object
Must have a .predict() method that takes numpy arrays.
input_xr : xarray.DataArray or xarray.Dataset.
Must have dimensions 'x' and 'y'
chunk_size : int
The dask chunk size to use on the flattened array. If this
is left as None, then the chunks size is inferred from the
.chunks method on the `input_xr`
persist : bool
If True, and proba=True, then 'input_xr' data will be
loaded into distributed memory. This will ensure data
is not loaded twice for the prediction of probabilities,
but this will only work if the data is not larger than
distributed RAM.
proba : bool
If True, predict probabilities
clean : bool
If True, remove Infs and NaNs from input and output arrays
return_input : bool
If True, then the data variables in the 'input_xr' dataset will
be appended to the output xarray dataset.
Returns
----------
output_xr : xarray.Dataset
An xarray.Dataset containing the prediction output from model.
if proba=True then dataset will also contain probabilites, and
if return_input=True then dataset will have the input feature layers.
Has the same spatiotemporal structure as input_xr.
"""
# if input_xr isn't dask, coerce it
dask = True
if not bool(input_xr.chunks):
dask = False
input_xr = input_xr.chunk({'x': len(input_xr.x), 'y': len(input_xr.y)})
#set chunk size if not supplied
if chunk_size is None:
chunk_size = int(input_xr.chunks['x'][0]) * \
int(input_xr.chunks['y'][0])
def _predict_func(model, input_xr, persist, proba, clean, return_input):
x, y, crs = input_xr.x, input_xr.y, input_xr.geobox.crs
input_data = []
for var_name in input_xr.data_vars:
input_data.append(input_xr[var_name])
input_data_flattened = []
for arr in input_data:
data = arr.data.flatten().rechunk(chunk_size)
input_data_flattened.append(data)
# reshape for prediction
input_data_flattened = da.array(input_data_flattened).transpose()
if clean == True:
input_data_flattened = da.where(da.isfinite(input_data_flattened),
input_data_flattened, 0)
if (proba == True) & (persist == True):
# persisting data so we don't require loading all the data twice
input_data_flattened = input_data_flattened.persist()
# apply the classification
print('predicting...')
out_class = model.predict(input_data_flattened)
# Mask out NaN or Inf values in results
if clean == True:
out_class = da.where(da.isfinite(out_class), out_class, 0)
# Reshape when writing out
out_class = out_class.reshape(len(y), len(x))
# stack back into xarray
output_xr = xr.DataArray(out_class,
coords={
"x": x,
"y": y
},
dims=["y", "x"])
output_xr = output_xr.to_dataset(name='Predictions')
if proba == True:
print(" probabilities...")
out_proba = model.predict_proba(input_data_flattened)
# convert to %
out_proba = da.max(out_proba, axis=1) * 100.0
if clean == True:
out_proba = da.where(da.isfinite(out_proba), out_proba, 0)
out_proba = out_proba.reshape(len(y), len(x))
out_proba = xr.DataArray(out_proba,
coords={
"x": x,
"y": y
},
dims=["y", "x"])
output_xr['Probabilities'] = out_proba
if return_input == True:
print(" input features...")
# unflatten the input_data_flattened array and append
# to the output_xr containin the predictions
arr = input_xr.to_array()
stacked = arr.stack(z=['y', | |
= self.tetrahedronList[TN].nodes[1]
newNodes[1] = self.tetrahedronList[TN].nodes[2]
self.tetrahedronList[TN].nodes = newNodes
def finalize(self):
self.buildLists()
#self.fixLocalNumbering()
self.buildBoundaryMaps()
self.buildArraysFromLists()
self.hMax = 0.0
self.hMin = 1.0e16
self.sigmaMax = 0.0
self.totalVolume = 0.0
for T in self.tetrahedronList:
T.computeGeometricInfo()
self.hMax = max(T.diameter,self.hMax)
self.hMin = min(T.diameter,self.hMin)
self.sigmaMax = max(old_div(T.diameter,T.innerDiameter),self.sigmaMax)
self.totalVolume += T.volume
def buildLists(self):
self.buildListsNodes()
self.buildListsEdges()
self.buildListsTriangles()
self.buildListsTetrahedra()
self.elementList = self.tetrahedronList
self.elementBoundaryList = self.triangleList
def buildListsNodes(self):
keyList = list(self.nodeDict.keys())
keyList.sort()
self.nodeList=[]
self.oldToNewNode=list(range(len(self.nodeDict)))
for nN,k in enumerate(keyList):
self.oldToNewNode[self.nodeDict[k].N]=nN
self.nodeDict[k].N = nN
self.nodeList.append(self.nodeDict[k])
def buildListsEdges(self):
keyList = list(self.edgeDict.keys())
keyList.sort()
self.edgeList=[]
for eN,k in enumerate(keyList):
self.edgeDict[k].N = eN
self.edgeList.append(self.edgeDict[k])
def buildListsTriangles(self):
keyList = list(self.triangleDict.keys())
keyList.sort()
self.triangleList=[]
for tN,k in enumerate(keyList):
self.triangleDict[k].N = tN
self.triangleList.append(self.triangleDict[k])
self.polygonList = self.triangleList
def buildListsTetrahedra(self):
keyList = list(self.tetrahedronDict.keys())
keyList.sort()
self.tetrahedronList=[]
for TN,k in enumerate(keyList):
self.tetrahedronDict[k].N = TN
self.tetrahedronList.append(self.tetrahedronDict[k])
self.polyhedronList = self.tetrahedronList
def buildBoundaryMaps(self):
"""
Extract a mapping tn -> list((TN,tnLocal)) that
provides all elements with the boundary face (triangle) tn
and the local triangle number for that triangle.
Likewise build mappings for edges and nodes
Also extract a list of the triangles with only one associate
element; these are the external boundary triangles. Then extract
the edges and nodes from the boundary triangles.
"""
self.triangleMap=[[] for t in self.triangleList]
self.edgeMap=[[] for e in self.edgeList]
self.nodeMap=[[] for n in self.nodeList]
self.boundaryTriangles=set()
self.interiorTriangles=set()
self.boundaryEdges=set()
self.boundaryNodes=set()
self.interiorEdges=set()
self.interiorNodes=set()
logEvent("Building triangle,edge, and node maps")
for T in self.tetrahedronList:
for localTriangleNumber,t in enumerate(T.triangles):
self.triangleMap[t.N].append((T.N,localTriangleNumber))
for localEdgeNumber,e in enumerate(T.edges):
self.edgeMap[e.N].append((T.N,localEdgeNumber))
for localNodeNumber,n in enumerate(T.nodes):
self.nodeMap[n.N].append((T.N,localNodeNumber))
logEvent("Extracting boundary and interior triangles")
for tN,etList in enumerate(self.triangleMap):
if len(etList) == 1:
self.boundaryTriangles.add(self.triangleList[tN])
else:
self.interiorTriangles.add(self.triangleList[tN])
logEvent("Extracting boundary edges and nodes")
for t in self.boundaryTriangles:
self.boundaryEdges.update(t.edges)
self.boundaryNodes.update(t.nodes)
logEvent("Extracting interior edges and nodes")
for t in self.interiorTriangles:
self.interiorEdges.update(t.edges)
self.interiorNodes.update(t.nodes)
self.boundaryMesh.buildFromSets(self.boundaryTriangles,
self.boundaryEdges,self.boundaryNodes)
def newTetrahedron(self,nodes):
T = Tetrahedron(tetrahedronNumber=len(self.tetrahedronDict),
nodes=nodes)
self.tetrahedronDict[T.nodes] = T
self.registerTriangles(T)
return T
def registerEdges(self,t):
for en,e in enumerate(t.edges):
if e.nodes in self.edgeDict:
t.edges[en]=self.edgeDict[e.nodes]
else:
eN=len(self.edgeDict)
e.N=eN
self.edgeDict[e.nodes]=e
def registerTriangles(self,T):
for tn,t in enumerate(T.triangles):
if t.nodes in self.triangleDict:
T.triangles[tn]=self.triangleDict[t.nodes]
else:
t.N=len(self.triangleDict)
self.triangleDict[t.nodes]=t
self.registerEdges(t)
def registerNode(self,node):
if node in self.nodeDict:
node = self.nodeDict[node]
else:
node.N = len(self.nodeDict)
self.nodeDict[node] = node
return node
def readMeshADH(self,filename,adhBase=1):
meshIn = open(filename+'.3dm','r')
firstLine = meshIn.readline()
firstWords = firstLine.split()
logEvent("Reading object=%s from file=%s" % (firstWords[0],filename))
line = meshIn.readline()
columns = line.split()
tets = []
tetEdges=set()
tetTriangles=set()
logEvent("Reading "+str(filename)+" and building node lists for tetrahedra,triangles, and edges")
#assume test are ordered by tet number
while (columns[0] == 'E4T'):
nodeNumbers = [int(c) - adhBase for c in columns[2:6]]
nodeNumbers.sort()
tets.append(array.array('i',nodeNumbers))
tetTriangles.update([(nodeNumbers[1],nodeNumbers[2],nodeNumbers[3]),
(nodeNumbers[0],nodeNumbers[2],nodeNumbers[3]),
(nodeNumbers[0],nodeNumbers[1],nodeNumbers[3]),
(nodeNumbers[0],nodeNumbers[1],nodeNumbers[2])])
tetEdges.update([(nodeNumbers[0],nodeNumbers[1]),
(nodeNumbers[0],nodeNumbers[2]),
(nodeNumbers[0],nodeNumbers[3]),
(nodeNumbers[1],nodeNumbers[2]),
(nodeNumbers[1],nodeNumbers[3]),
(nodeNumbers[2],nodeNumbers[3])])
line = meshIn.readline()
columns = line.split()
print("Building node list and dict")
#assume nodes are ordered by node number
while (len(columns) == 5):
newNode = Node(int(columns[1]) - adhBase,
float(columns[2]),
float(columns[3]),
float(columns[4]))
self.nodeList.append(newNode)
self.nodeDict[newNode]=newNode
line = meshIn.readline()
columns = line.split()
print("Number of tetrahedra:"+str(len(tets)))
print("Number of triangles :"+str(len(tetTriangles)))
print("Number of edges :"+str(len(tetEdges)))
print("Number of nodes :"+str(len(self.nodeList)))
print("Number of objects :"+str(len(tetEdges)+len(tetTriangles)+len(tets)+len(self.nodeList)))
print("Building edge list")
self.edgeList =[Edge(edgeNumber=eN,nodes=[self.nodeList[nN[0]],self.nodeList[nN[1]]]) \
for eN,nN in enumerate(tetEdges)]
print("Building edge dict")
self.edgeDict = dict([(e.nodes,e) for e in self.edgeList])
print("Building triangle list")
self.triangleList =[Triangle(triangleNumber=tN,nodes=[self.nodeList[nN[0]],self.nodeList[nN[1]],self.nodeList[nN[2]]],edgeDict=self.edgeDict) \
for tN,nN in enumerate(tetTriangles)]
print("Building triangle dict")
self.triangleDict = dict([(t.nodes,t) for t in self.triangleList])
print("Building tetredron list")
self.tetrahedronList = [Tetrahedron(tetrahedronNumber=TN,
nodes=[self.nodeList[nN[0]],self.nodeList[nN[1]],self.nodeList[nN[2]],self.nodeList[nN[3]]],
edgeDict=self.edgeDict,
triangleDict=self.triangleDict) \
for TN,nN in enumerate(tets)]
self.elementList = self.tetrahedronList
self.elementBoundaryList = self.triangleList
print("Building tetrahedron dict")
self.tetrahedronDict = dict([(T.nodes,T) for T in self.tetrahedronList])
print("Building boundary maps")
self.buildBoundaryMaps()
def writeMeshXdmf(self,ar,name='',t=0.0,init=False,meshChanged=False,tCount=0, EB=False):
#print "Warning mwf hack for EB printing for tet writeMeshXdmf for now"
#EB = True
Mesh.writeMeshXdmf(self,ar,name,t,init,meshChanged,"Tetrahedron",tCount,EB=EB)
def writeMeshEnsight(self,filename,description=None):
base=1
#write the casefile
caseOut=open(filename+'.case','w')
caseOut.write('FORMAT\n'+'type: ensight gold\n')
caseOut.write('GEOMETRY\n'+'model: '+filename+'.geo\n')
caseOut.close()
meshOut=open(filename+'.geo','w')
meshOut.write('Ensight Gold\n')
meshOut.write('Unstructured Tetrahedral Mesh\n')
meshOut.write('node id given\n')
meshOut.write('element id given\n')
#extents = 'extents\n %12.5E %12.5E\n %12.5E %12.5E\n %12.5E %12.5E\n' % (self.xmin,self.xmax,self.ymin,self.ymax,self.zmin,self.zmax)
#meshOut.write('extents\n'+`self.xmin`+' '+`self.xmax`+'\n')
meshOut.write('part \n'+'%10i\n' % 1)
if description:
meshOut.write(description+'\n')
else:
meshOut.write('A Mesh\n')
meshOut.write('coordinates\n'+'%10i\n' % self.nNodes_global)
for nN in range(self.nNodes_global):
meshOut.write('%10i\n' % (nN+base))
for nN in range(self.nNodes_global):
meshOut.write('%12.5E\n' % self.nodeArray[nN,0])
for nN in range(self.nNodes_global):
meshOut.write('%12.5E\n' % self.nodeArray[nN,1])
for nN in range(self.nNodes_global):
meshOut.write('%12.5E\n' % self.nodeArray[nN,2])
meshOut.write('tetra4\n'+'%10i\n' % self.nElements_global)
for eN in range(self.nElements_global):
meshOut.write('%10i\n' % (eN+base))
for eN in range(self.nElements_global):
meshOut.write('%10i%10i%10i%10i\n' % tuple((nN+base) for nN in self.elementNodesArray[eN,:]))
meshOut.close()
def appendMeshEnsight(self,filename,description=None):
base=1
#write the casefile
caseOut=open(filename+'.case','a')
caseOut.write('GEOMETRY\n'+'model: '+filename+'.geo\n')
caseOut.close()
meshOut=open(filename+'.geo','w')
meshOut.write('Unstructured Tetrahedral Mesh\n\n')
meshOut.write('node id given\n')
meshOut.write('element id given\n')
#extents = 'extents\n %12.5E %12.5E\n %12.5E %12.5E\n %12.5E %12.5E\n' % (self.xmin,self.xmax,self.ymin,self.ymax,self.zmin,self.zmax)
#meshOut.write('extents\n'+`self.xmin`+' '+`self.xmax`+'\n')
meshOut.write('part \n'+'%10i\n' % 1)
if description:
meshOut.write(description+'\n')
else:
meshOut.write("A Mesh")
meshOut.write('coordinates\n'+'%10i\n' % len(self.nodeList))
for n in self.nodeList:
nN = n.N+base
meshOut.write('%10i\n' % nN)
for n in self.nodeList:
meshOut.write('%12.5E\n' % n.p[X])
for n in self.nodeList:
meshOut.write('%12.5E\n' % n.p[Y])
for n in self.nodeList:
meshOut.write('%12.5E\n' % n.p[Z])
meshOut.write('tetra4\n'+'%10i\n' % len(self.elementList))
for e in self.elementList:
eN = e.N + base
meshOut.write('%10i\n' % eN)
for e in self.elementList:
meshOut.write('%10i%10i%10i%10i\n' % tuple(n.N+base for n in e.nodes))
meshOut.close()
def writeMeshADH(self,filename,adhBase=1):
from . import cmeshTools
cmeshTools.write3dmFiles(self.cmesh,filename,adhBase)
def writeBoundaryFacesADH(self,filename,adhBase=1):
boundaryFacesOut=open(filename,'w')
for t in self.boundaryTriangles:
TN = self.triangleMap[t.N][0][0]
T = self.tetrahedronList[TN]
localFaceNumber = self.triangleMap[t.N][0][1]
T.computeGeometricInfo()
DJ = edet(T.linearMap)
if DJ < 0:
#print "Negative determinant ="+`DJ`+" Swapping two nodes"
newNodes = list(T.nodes)
newNodes[3] = T.nodes[2]
newNodes[2] = T.nodes[3]
newBasis = [n - newNodes[0] for n in newNodes[1:]]
newMap = ETen(newBasis[0],newBasis[1],newBasis[2])
#print "New Determinant "+`edet(newMap)`
if localFaceNumber == T.nodes[2]:
localFaceNumber = T.nodes[3]
elif localFaceNumber == T.nodes[3]:
localFaceNumber = T.nodes[2]
line = 'FCS %5i %5i %5i' % \
(T.N + adhBase,
localFaceNumber + adhBase,
1)
#print line
boundaryFacesOut.write(line+'\n')
boundaryFacesOut.close()
def writeBoundaryNodesADH(self,filename,adhBase=1):
boundaryNodesOut=open(filename,'w')
for n in self.boundaryNodes:
line = 'NDS %5i %5i' % \
(n.N + adhBase,
1)
#print line
boundaryNodesOut.write(line+'\n')
boundaryNodesOut.close()
def refine4T(self,oldMesh):
childrenDict={}
for T in oldMesh.tetrahedronList:
#deep copy old nodes because we'll renumber
TNodes = [Node(eN,n.p[X],n.p[Y],n.p[Z]) for eN,n in enumerate(T.nodes)]
for lnN,n in enumerate(TNodes): TNodes[lnN]=self.registerNode(n)
#add new node
T.computeGeometricInfo()
newNode = Node(len(self.nodeDict),
T.barycenter[X],
T.barycenter[Y],
T.barycenter[Z])
newNode = self.registerNode(newNode)
T1=self.newTetrahedron([TNodes[0],TNodes[1],TNodes[2],newNode])
T2=self.newTetrahedron([TNodes[1],TNodes[2],TNodes[3],newNode])
T3=self.newTetrahedron([TNodes[2],TNodes[3],TNodes[0],newNode])
T4=self.newTetrahedron([TNodes[3],TNodes[0],TNodes[1],newNode])
childrenDict[T.N]=[T1,T2,T3,T4]
self.finalize()
return childrenDict
def refineFreudenthalBey(self,oldMesh):
logEvent("Refining the mesh using Freudenthal-Bey refinement")
childrenDict={}
for T in list(oldMesh.tetrahedronDict.values()):
#deep copy old nodes because we'll renumber
TNodes = [Node(nN,n.p[X],n.p[Y],n.p[Z]) for nN,n in \
enumerate(T.nodes)]
for lnN,n in enumerate(TNodes): TNodes[lnN]=self.registerNode(n)
#add new nodes (midpoints of edges)
#use local edge tuples as keys
newNodes={}
for et,en in T.edgeMap.items():
T.edges[en].computeGeometricInfo()
p = T.edges[en].barycenter
newNodes[et] = Node(en,p[X],p[Y],p[Z])
#set the global node numbers
for k,n in newNodes.items(): newNodes[k]=self.registerNode(n)
#add corner tets
T1=self.newTetrahedron([TNodes[0],
newNodes[(0,1)],
newNodes[(0,2)],
newNodes[(0,3)]])
T2=self.newTetrahedron([TNodes[1],
newNodes[(0,1)],
newNodes[(1,2)],
newNodes[(1,3)]])
T3=self.newTetrahedron([TNodes[2],
newNodes[(0,2)],
newNodes[(1,2)],
newNodes[(2,3)]])
T4=self.newTetrahedron([TNodes[3],
newNodes[(0,3)],
newNodes[(1,3)],
newNodes[(2,3)]])
#add center tets
#choose the shortest diagonal of the octahedron
dLengths = [enorm(newNodes[(0,1)].p-newNodes[(2,3)].p),
enorm(newNodes[(0,2)].p-newNodes[(1,3)].p),
enorm(newNodes[(0,3)].p-newNodes[(1,2)].p)]
shortestEdgeLength = min(dLengths)
if shortestEdgeLength == dLengths[0]:
#diagonal (0,1)(2,3)
T5=self.newTetrahedron([newNodes[(0,1)],
newNodes[(2,3)],
newNodes[(0,3)],
newNodes[(1,3)]])
T6=self.newTetrahedron([newNodes[(0,1)],
newNodes[(2,3)],
newNodes[(0,3)],
newNodes[(0,2)]])
T7=self.newTetrahedron([newNodes[(0,1)],
newNodes[(2,3)],
newNodes[(0,2)],
newNodes[(1,2)]])
T8=self.newTetrahedron([newNodes[(0,1)],
newNodes[(2,3)],
newNodes[(1,2)],
newNodes[(1,3)]])
elif shortestEdgeLength == dLengths[1]:
#diagonal (0,2)(1,3)
T5=self.newTetrahedron([newNodes[(0,2)],
newNodes[(1,3)],
newNodes[(0,3)],
newNodes[(2,3)]])
T6=self.newTetrahedron([newNodes[(0,2)],
newNodes[(1,3)],
newNodes[(2,3)],
newNodes[(1,2)]])
T7=self.newTetrahedron([newNodes[(0,2)],
newNodes[(1,3)],
newNodes[(1,2)],
newNodes[(0,1)]])
T8=self.newTetrahedron([newNodes[(0,2)],
newNodes[(1,3)],
newNodes[(0,1)],
newNodes[(0,3)]])
else:
#diagonal (0,3)(1,2)
T5=self.newTetrahedron([newNodes[(0,3)],
newNodes[(1,2)],
newNodes[(0,1)],
newNodes[(1,3)]])
T6=self.newTetrahedron([newNodes[(0,3)],
newNodes[(1,2)],
newNodes[(1,3)],
newNodes[(2,3)]])
T7=self.newTetrahedron([newNodes[(0,3)],
newNodes[(1,2)],
newNodes[(2,3)],
newNodes[(0,2)]])
T8=self.newTetrahedron([newNodes[(0,3)],
newNodes[(1,2)],
newNodes[(0,2)],
newNodes[(0,1)]])
childrenDict[T.N]=[T1,T2,T3,T4,T5,T6,T7,T8]
self.finalize()
return childrenDict
#for debugging: print each tet
#self.edgeList=[]
#Tlist = self.tetrahedronDict.values()
#for T in Tlist:
# self.edgeList = self.edgeList + T.edges
def refine(self,oldMesh):
return self.refineFreudenthalBey(oldMesh)
def generateFromTetgenFiles(self,filebase,base,skipGeometricInit=False,parallel=False):
from . import cmeshTools
logEvent(memory("declaring CMesh"),level=4)
self.cmesh = cmeshTools.CMesh()
logEvent(memory("Initializing CMesh"),level=4)
if parallel:
cmeshTools.generateFromTetgenFilesParallel(self.cmesh,filebase,base)
else:
cmeshTools.generateFromTetgenFiles(self.cmesh,filebase,base)
logEvent(memory("calling cmeshTools.generateFromTetgenFiles","cmeshTools"),level=4)
if skipGeometricInit == False:
cmeshTools.allocateGeometricInfo_tetrahedron(self.cmesh)
cmeshTools.computeGeometricInfo_tetrahedron(self.cmesh)
self.buildFromC(self.cmesh)
logEvent(memory("calling buildFromC"),level=4)
def generateFrom3DMFile(self,filebase,base=1):
from . import cmeshTools
self.cmesh = cmeshTools.CMesh()
cmeshTools.generateFrom3DMFile(self.cmesh,filebase,base)
cmeshTools.allocateGeometricInfo_tetrahedron(self.cmesh)
cmeshTools.computeGeometricInfo_tetrahedron(self.cmesh)
self.buildFromC(self.cmesh)
def writeTetgenFiles(self,filebase,base):
from . import cmeshTools
cmeshTools.writeTetgenFiles(self.cmesh,filebase,base)
def meshInfo(self):
minfo = """Number of tetrahedra : %d
Number of triangles : %d
Number of edges : %d
Number of nodes : %d
max(sigma_k) : %f
min(h_k) : %f\n""" % (self.nElements_global,
self.nElementBoundaries_global,
self.nEdges_global,
self.nNodes_global,
self.sigmaMax,
self.hMin)
if self.subdomainMesh != self:
sinfo = self.subdomainMesh.meshInfo()
info = "*** Global ***\n" + minfo + "\n*** Local ***\n" + sinfo
return info
return minfo
class HexahedralMesh(Mesh):
"""A mesh of hexahedra.
"""
def __init__(self):
Mesh.__init__(self)
self.nodeDict={}
self.edgeDict={}
self.faceDict={}
self.faceList=[]
self.elemDict={}
self.elemList=[]
self.oldToNewNode=[]
self.boundaryMesh=QuadrilateralMesh()
def meshType(self):
return 'cuboid'
def computeGeometricInfo(self):
from . import cmeshTools
print("no info yet for hexahedral mesh")
#cmeshTools.computeGeometricInfo_tetrahedron(self.cmesh)
def generateHexahedralMeshFromRectangularGrid(self,nx,ny,nz,Lx,Ly,Lz):
from . import cmeshTools
self.cmesh = cmeshTools.CMesh()
cmeshTools.generateHexahedralMeshFromRectangularGrid(nx,ny,nz,0,0,0,Lx,Ly,Lz,self.cmesh)
cmeshTools.allocateGeometricInfo_hexahedron(self.cmesh)
cmeshTools.computeGeometricInfo_hexahedron(self.cmesh)
self.buildFromC(self.cmesh)
def finalize(self):
self.buildLists()
#self.fixLocalNumbering()
self.buildBoundaryMaps()
self.buildArraysFromLists()
self.hMax = 0.0
self.hMin = 1.0e16
self.sigmaMax | |
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Plugin which tells Pylint how to handle classes which define attributes using jsonschema
in "schema" class attribute.
Those classes dyamically assign attributes defined in the schema on the class inside the
constructor.
Pylint uses static analysis on the python files it lints. This means that it does not
import any of the code using standard python libraries. Instead, it parses them into an
AST using the astroid library. Thus, it is safe to run Pylint on code that would
have import time side-effects without triggering those effects. When parsing a single file,
Pylint can parse the direct dependencies of that file without following the entire
import chain, which might include significant transient dependencies in 3rd party libraries.
Since pylint is using an AST instead of importing the code, it cannot know about the dynamic
attributes that get added to our API model classes. Plus, the schema itself is often
constructed by including copies of common sub schemas. So, the attributes are dynamic AND
the schemas that define those attributes are also dynamic.
So, in this plugin we have to do a bit of work to:
1) extract the "schema =" assignment,
2) parse the assigned value to find any other variables used in the schema object,
3) extract the assignments for those other variables, and
4) construct a final dictionary AST node that includes all the attributes that
Pylint needs to know about on our model classes.
At this point, we have the schema, so then we:
5) iterate over the schema properties,
6) parse each property's value to find any other referenced variables,
7) extract the assignments for those referenced variables,
8) inspect the types of those properties (str, int, list, etc), and
9) add new attribute AST nodes (of the appropriate type) to the class AST node.
Now, we return because Pylint can finally understand our API model objects without
importing them.
"""
import astroid
from astroid import MANAGER
from astroid import nodes
from astroid import scoped_nodes
# A list of class names for which we want to skip the checks
CLASS_NAME_SKIPLIST = ["ExecutionSpecificationAPI"]
def register(linter):
pass
def infer_copy_deepcopy(call_node):
"""
Look for a call_node (ie a function call) like this:
schema = copy.deepcopy(...)
^^^^^^^^^^^^^
Ignore any function calls that are not copy.deepcopy().
"""
if not (
isinstance(call_node, nodes.Call)
and call_node.func.as_string() == "copy.deepcopy"
):
return
return next(call_node.args[0].infer())
def predicate(cls: nodes.ClassDef) -> bool:
"""
Astroid (used by pylint) calls this to see if our transform function needs to run.
"""
if cls.name in CLASS_NAME_SKIPLIST:
# class looks like an API model class, but it isn't.
return False
if not cls.name.endswith("API") and "schema" not in cls.locals:
# class does not look like an API model class.
return False
return True
def transform(cls: nodes.ClassDef):
"""
Astroid (used by pylint) calls this function on each class definition it discovers.
cls is an Astroid AST representation of that class.
Our purpose here is to extract the schema dict from API model classes
so that we can inform pylint about all of the attributes on those models.
We do this by injecting attributes on the class for each property in the schema.
"""
# This is a class which defines attributes in "schema" variable using json schema.
# Those attributes are then assigned during run time inside the constructor
# Get the value node for the "schema =" assignment
schema_dict_node = next(cls.igetattr("schema"))
extra_schema_properties = {}
# If the "schema =" assignment's value node is not a simple type (like a dictionary),
# then pylint cannot infer exactly what it does. Most of the time, this is actually
# a function call to copy the schema from another class. So, let's find the dictionary.
if schema_dict_node is astroid.Uninferable:
# the assignment probably looks like this:
# schema = copy.deepcopy(ActionAPI.schema)
# so far we only have the value, but we need the actual assignment
assigns = [n for n in cls.get_children() if isinstance(n, nodes.Assign)]
schema_assign_name_node = cls.local_attr("schema")[0]
schema_assign_node = next(
assign for assign in assigns if assign.targets[0] == schema_assign_name_node
)
assigns.remove(schema_assign_node)
# We only care about "schema = copy.deepcopy(...)"
schema_dict_node = infer_copy_deepcopy(schema_assign_node.value)
if not schema_dict_node:
# This is not an API model class, as it doesn't have
# something we can resolve to a dictionary.
return
# OK, now we need to look for any properties that dynamically modify
# the dictionary that was just copied from somewhere else.
# See the note below for why we only care about "properties" here.
for assign_node in assigns:
# we're looking for assignments like this:
# schema["properties"]["ttl"] = {...}
target = assign_node.targets[0]
try:
if (
isinstance(target, nodes.Subscript)
and target.value.value.name == "schema"
and target.value.slice.value.value == "properties"
):
property_name_node = target.slice.value
else:
# not schema["properties"]
continue
except AttributeError:
continue
# schema["properties"]["execution"] = copy.deepcopy(ActionExecutionAPI.schema)
inferred_value = infer_copy_deepcopy(assign_node.value)
extra_schema_properties[property_name_node] = (
inferred_value if inferred_value else assign_node.value
)
if not isinstance(schema_dict_node, nodes.Dict):
# Not a class we are interested in (like BaseAPI)
return
# We only care about "properties" in the schema because that's the only part of the schema
# that gets translated into dynamic attributes on the model API class.
properties_dict_node = None
for key_node, value_node in schema_dict_node.items:
if key_node.value == "properties":
properties_dict_node = value_node
break
if not properties_dict_node and not extra_schema_properties:
# Not a class we can do anything with
return
# Hooray! We have the schema properties dict now, so we can start processing
# each property and add an attribute for each one to the API model class node.
for property_name_node, property_data_node in properties_dict_node.items + list(
extra_schema_properties.items()
):
property_name = property_name_node.value.replace(
"-", "_"
) # Note: We do the same in Python code
# Despite the processing above to extract the schema properties dictionary
# each property in the dictionary might also reference other variables,
# so we still need to resolve these to figure out each property's type.
# an indirect reference to copy.deepcopy() as in:
# REQUIRED_ATTR_SCHEMAS = {"action": copy.deepcopy(ActionAPI.schema)}
# schema = {"properties": {"action": REQUIRED_ATTR_SCHEMAS["action"]}}
if isinstance(property_data_node, nodes.Subscript):
var_name = property_data_node.value.name
subscript = property_data_node.slice.value.value
# lookup var by name (assume its at module level)
var_node = next(cls.root().igetattr(var_name))
# assume it is a dict at this point
data_node = None
for key_node, value_node in var_node.items:
if key_node.value == subscript:
# infer will resolve a Dict
data_node = next(value_node.infer())
if data_node is astroid.Uninferable:
data_node = infer_copy_deepcopy(value_node)
break
if data_node:
property_data_node = data_node
if not isinstance(property_data_node, nodes.Dict):
# if infer_copy_deepcopy already ran, we may need to resolve the dict
data_node = next(property_data_node.infer())
if data_node is not astroid.Uninferable:
property_data_node = data_node
property_type_node = None
if isinstance(property_data_node, nodes.Dict):
# We have a property schema, but we only care about the property's type.
for property_key_node, property_value_node in property_data_node.items:
if property_key_node.value == "type":
property_type_node = next(property_value_node.infer())
break
if property_type_node is None and isinstance(
property_data_node, nodes.Attribute
):
# reference schema from another file like this:
# from ... import TriggerAPI
# schema = {"properties": {"trigger": TriggerAPI.schema}}
# We only pull a schema from another file when it is an "object" (a dict).
# So, we do not need to do any difficult cross-file processing.
property_type = "object"
elif property_type_node is None:
property_type = None
elif isinstance(property_type_node, nodes.Const):
property_type = property_type_node.value
elif isinstance(property_type_node, (nodes.List, nodes.Tuple)):
# Hack for attributes with multiple types (e.g. string, null)
property_type = property_type_node.elts[
0
].value # elts has "elements" in the list/tuple
else:
# We should only hit this if someone has used a different approach
# for dynamically constructing | |
2, 'probable', 'SRC12'),
Source(13, 'Source 13', 3, 'contested', 'SRC13'),
Source(14, 'Source 14', 3, None, 'SRC14'),
Source(15, 'Source 15', 1, None, 'SRC15'),
Source(16, 'Source 16', 2, 'probable', 'SRC16'),
Source(17, 'Source 17', 3, 'certain', 'SRC17'),
Source(18, 'Source 18', 2, 'probable', 'SRC18'),
Source(19, 'Source 19', 2, 'probable', 'SRC19'),
Source(20, 'Source 20', 1, 'uncertain', 'SRC20'),
Source(21, 'Source 21', 2, 'probable', 'SRC21'),
Source(22, 'Source 22', 3, 'contested', 'SRC22'),
Source(23, 'Source 23', 3, None, 'SRC23'),
Source(24, 'Source 24', 2, 'false', 'SRC24'),
Source(25, 'Source 25', 2, 'contested', 'SRC25'),
Source(26, 'Source 26', 2, None, 'SRC26'),
Source(27, 'Source 27', 2, 'certain', 'SRC27'),
Source(28, 'Source 28', 1, 'contested', 'SRC28'),
Source(29, 'Source 29', 1, 'certain', 'SRC29'),
Source(30, 'Source 30', 3, 'contested', 'SRC30'),
Source(31, 'Source 31', 3, 'probable', 'SRC31'),
]
# }}}
SourceInstance = namedtuple('source_instance', ['id', 'source_id', 'evidence_id', 'source_page', 'source_confidence', 'comment'])# {{{
source_instance_table = [
SourceInstance( 1, 1, 1, '3934-3999', 'false', 'Comment 1'),
SourceInstance( 2, 2, 2, '479', 'uncertain', 'Comment 2'),
SourceInstance( 3, 3, 3, None, None, 'Comment 3'),
SourceInstance( 4, 4, 4, '80', 'probable', None),
SourceInstance( 5, 5, 5, '3334', 'false', 'Comment 5'),
SourceInstance( 6, 6, 6, '3355', 'probable', 'Comment 6'),
SourceInstance( 7, 7, 7, '2168', None, 'Comment 7'),
SourceInstance( 8, 8, 8, '1383', 'contested', None),
SourceInstance( 9, 9, 9, '56', None, 'Comment 9'),
SourceInstance(10, 10, 10, '1202', None, 'Comment 10'),
SourceInstance(11, 11, 11, '1878', 'probable', 'Comment 11'),
SourceInstance(12, 12, 12, '1515', 'certain', 'Comment 12'),
SourceInstance(13, 13, 13, '3174', 'uncertain', 'Comment 13'),
SourceInstance(14, 14, 14, '509', None, None),
SourceInstance(15, 15, 15, '2126', 'false', 'Comment 15'),
SourceInstance(16, 16, 16, None, None, 'Comment 16'),
SourceInstance(17, 17, 17, '1865', 'probable', 'Comment 17'),
SourceInstance(18, 18, 18, '2730', 'probable', 'Comment 18'),
SourceInstance(19, 19, 19, '2757', 'probable', 'Comment 19'),
SourceInstance(20, 20, 20, '499', 'certain', None),
SourceInstance(21, 21, 1, None, 'probable', None),
SourceInstance(22, 22, 1, '1209', 'false', 'Comment 22'),
SourceInstance(23, 23, 1, '1488', 'certain', 'Comment 23'),
SourceInstance(24, 24, 2, '3335', 'false', 'Comment 24'),
SourceInstance(25, 25, 2, '2204', 'false', 'Comment 25'),
SourceInstance(26, 26, 2, '400', None, None),
SourceInstance(27, 27, 2, '522', 'probable', 'Comment 27'),
SourceInstance(28, 28, 2, None, 'uncertain', 'Comment 28'),
SourceInstance(29, 29, 3, '2573', 'certain', None),
SourceInstance(30, 1, 3, '3191', 'contested', 'Comment 30'),
SourceInstance(31, 1, 3, None, None, None),
SourceInstance(32, 2, 3, '1885', 'false', 'Comment 32'),
SourceInstance(33, 3, 3, None, 'uncertain', 'Comment 33'),
SourceInstance(34, 4, 3, None, 'uncertain', 'Comment 34'),
]
# }}}
Tag = namedtuple('tag', ['id', 'tagname', 'comment'])# {{{
tag_table = [
Tag(1, 'tag1', 'test comment'),
Tag(2, 'tag2', 'test comment 2'),
Tag(3, 'tag3', 'test comment 3'),
Tag(4, 'tag4', 'test comment 4'),
Tag(5, 'tag5', None),
Tag(6, 'tag6', 'lorem'),
Tag(7, 'tag7', 'ipsum'),
Tag(8, 'tag8', None),
Tag(9, 'tag9', None),
Tag(10, 'tag10', 'test'),
]
# }}}
TagEvidence = namedtuple('tag_evidence', ['id', 'tag_id', 'evidence_id'])# {{{
tag_evidence_table = [
TagEvidence(1, 1, 1),
TagEvidence(2, 2, 1),
TagEvidence(3, 4, 2),
TagEvidence(4, 7, 2),
TagEvidence(5, 8, 2),
TagEvidence(6, 1, 3),
TagEvidence(7, 2, 5),
TagEvidence(8, 4, 6),
TagEvidence(9, 5, 7),
TagEvidence(10, 5, 8),
TagEvidence(11, 6, 9),
TagEvidence(12, 8, 9),
TagEvidence(13, 2, 10),
TagEvidence(14, 3, 10),
TagEvidence(15, 4, 10),
TagEvidence(16, 1, 13),
TagEvidence(17, 1, 17),
TagEvidence(18, 7, 17),
TagEvidence(19, 2, 18),
TagEvidence(20, 1, 19),
TagEvidence(21, 2, 19),
TagEvidence(22, 8, 19),
TagEvidence(23, 9, 19),
TagEvidence(24, 10, 19),
TagEvidence(25, 1, 20),
TagEvidence(26, 2, 20),
TagEvidence(27, 2, 22),
TagEvidence(28, 3, 22),
TagEvidence(29, 3, 23),
TagEvidence(30, 4, 23),
TagEvidence(31, 5, 24),
TagEvidence(32, 7, 24),
]
# }}}
UserAction = namedtuple('user_action', ['id', 'evidence_id', 'action_type_id', 'user_id', 'timestamp', 'description', 'old_value', 'comment'])# {{{
user_action_table = [
UserAction(1, 1, 1, 4, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 1', None, None),
UserAction(2, 2, 1, 2, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 2', None, None),
UserAction(3, 3, 1, 3, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 3', None, 'comment'),
UserAction(4, 4, 1, 2, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 4', None, None),
UserAction(5, 5, 1, 2, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 5', None, None),
UserAction(6, 6, 1, 2, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 6', None, None),
UserAction(7, 7, 1, 1, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 7', None, 'comment'),
UserAction(8, 8, 1, 4, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 8', None, None),
UserAction(9, 9, 1, 3, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 9', None, None),
UserAction(10, 10, 1, 1, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 10', None, None),
UserAction(11, 11, 1, 3, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 11', None, 'comment'),
UserAction(12, 12, 1, 2, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 12', None, None),
UserAction(13, 13, 1, 2, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 13', None, 'comment'),
UserAction(14, 14, 1, 3, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 14', None, None),
UserAction(15, 15, 1, 1, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 15', None, None),
UserAction(16, 16, 1, 1, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 16', None, 'comment'),
UserAction(17, 17, 1, 1, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 17', None, 'comment'),
UserAction(18, 18, 1, 1, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 18', None, None),
UserAction(19, 19, 1, 1, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 19', None, 'comment'),
UserAction(20, 20, 1, 1, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 20', None, None),
UserAction(21, 21, 1, 4, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 21', None, None),
UserAction(22, 22, 1, 3, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 22', None, 'comment'),
UserAction(23, 23, 1, 2, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 23', None, 'comment'),
UserAction(24, 24, 1, 1, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 24', None, None),
UserAction(25, 25, 1, 3, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 25', None, None),
UserAction(26, 26, 1, 2, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 26', None, 'comment'),
UserAction(27, 27, 1, 2, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 27', None, 'comment'),
UserAction(28, 28, 1, 1, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 28', None, None),
UserAction(29, 29, 1, 1, datetime.datetime(2020, 4, 20, 14, 0, 0), 'Create evidence 29', None, None),
UserAction(30, 7, 4, 4, datetime.datetime(2020, 4, 20, 14, 0, 0) + datetime.timedelta(minutes=9), 'Modify ', {"foo": 1}, 'comment'),
UserAction(31, 5, 2, 2, datetime.datetime(2020, 4, 20, 14, 0, 0) + datetime.timedelta(minutes=45), 'Modify ', {"a": "b"}, 'comment'),
UserAction(32, 15, 4, 3, datetime.datetime(2020, 4, 20, 14, 0, 0) + datetime.timedelta(minutes=227), 'Modify ', {"x": True, "y": 12}, 'comment'),
UserAction(33, 28, 3, 2, datetime.datetime(2020, 4, 20, 14, 0, 0) + datetime.timedelta(minutes=380), 'Modify ', {"val": 12}, None),
UserAction(34, 4, 2, 4, datetime.datetime(2020, 4, 20, 14, 0, 0) + datetime.timedelta(minutes=523), 'Modify ', {"val": 13}, 'comment'),
UserAction(35, 12, 3, 2, datetime.datetime(2020, 4, 20, 14, 0, 0) + datetime.timedelta(minutes=702), 'Modify ', {"val": 14}, None),
UserAction(36, 11, 4, 4, datetime.datetime(2020, 4, 20, 14, 0, 0) + datetime.timedelta(minutes=814), 'Modify ', {"val": 15}, 'comment'),
UserAction(37, 17, 2, 2, datetime.datetime(2020, 4, 20, 14, 0, 0) + datetime.timedelta(minutes=911), 'Modify ', {"val": 16}, 'comment'),
UserAction(38, 23, 2, 4, datetime.datetime(2020, 4, 20, 14, 0, 0) + datetime.timedelta(minutes=1027), 'Modify ', {"val": 17}, None),
UserAction(39, 15, 2, 2, datetime.datetime(2020, 4, 20, 14, 0, 0) + datetime.timedelta(minutes=1250), 'Modify ', {"val": 18}, 'comment'),
UserAction(40, 24, 2, 3, datetime.datetime(2020, 4, 20, 14, 0, 0) + datetime.timedelta(minutes=1331), 'Modify ', {"val": 19}, None),
UserAction(41, 2, 4, 4, datetime.datetime(2020, 4, 20, 14, 0, 0) + datetime.timedelta(minutes=1517), 'Modify ', {"val": 20}, None),
UserAction(42, 12, 4, 1, datetime.datetime(2020, 4, 20, 14, 0, 0) + datetime.timedelta(minutes=1522), 'Modify ', {"val": 21}, None),
UserAction(43, 11, 2, 1, datetime.datetime(2020, 4, 20, 14, 0, 0) + datetime.timedelta(minutes=1696), 'Modify ', {"val": 22}, 'comment'),
UserAction(44, 5, 2, 3, datetime.datetime(2020, 4, 20, 14, 0, 0) + datetime.timedelta(minutes=1803), 'Modify ', {"val": 23}, None),
UserAction(45, 25, 3, 3, datetime.datetime(2020, 4, 20, 14, 0, 0) + datetime.timedelta(minutes=1850), 'Modify ', {"val": 24}, 'comment'),
UserAction(46, 5, 3, 4, datetime.datetime(2020, 4, 20, 14, 0, 0) + datetime.timedelta(minutes=1958), 'Modify ', {"val": 25}, 'comment'),
UserAction(47, 4, 4, 4, datetime.datetime(2020, 4, 20, 14, 0, 0) + datetime.timedelta(minutes=1987), 'Modify ', {}, None),
UserAction(48, | |
<filename>src/pymor/core/cache.py
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2020 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
"""This module provides the caching facilities of pyMOR.
Any class that wishes to provide cached method calls should derive from
:class:`CacheableObject`. Methods which are to be cached can then
be marked using the :class:`cached` decorator.
To ensure consistency, :class:`CacheableObject` derives from
|ImmutableObject|: The return value of a cached method call should
only depend on its arguments as well as the immutable state of the class
instance.
Making this assumption, the keys for cache lookup are created from
the following data:
1. the instance's :attr:`~CacheableObject.cache_id` in case of a
:attr:`~CacheRegion.persistent` :class:`CacheRegion`, else the instance's
:attr:`~pymor.core.base.BasicObject.uid`,
2. the method's `__name__`,
3. the method's arguments.
Note that instances of |ImmutableObject| are allowed to have mutable
private attributes. It is the implementors responsibility not to break things.
(See this :ref:`warning <ImmutableObjectWarning>`.)
Backends for storage of cached return values derive from :class:`CacheRegion`.
Currently two backends are provided for memory-based and disk-based caching
(:class:`MemoryRegion` and :class:`DiskRegion`). The available regions
are stored in the module level `cache_regions` dict. The user can add
additional regions (e.g. multiple disk cache regions) as required.
:attr:`CacheableObject.cache_region` specifies a key of the `cache_regions` dict
to select a cache region which should be used by the instance.
(Setting :attr:`~CacheableObject.cache_region` to `None` or `'none'` disables caching.)
By default, a 'memory', a 'disk' and a 'persistent' cache region are configured. The
paths and maximum sizes of the disk regions, as well as the maximum number of keys of
the memory cache region can be configured via the
`pymor.core.cache.default_regions.disk_path`,
`pymor.core.cache.default_regions.disk_max_size`,
`pymor.core.cache.default_regions.persistent_path`,
`pymor.core.cache.default_regions.persistent_max_size` and
`pymor.core.cache.default_regions.memory_max_keys` |defaults|.
There two ways to disable and enable caching in pyMOR:
1. Calling :func:`disable_caching` (:func:`enable_caching`), to disable
(enable) caching globally.
2. Calling :meth:`CacheableObject.disable_caching`
(:meth:`CacheableObject.enable_caching`) to disable (enable) caching
for a given instance.
Caching of a method is only active if caching has been enabled both globally
(enabled by default) and on instance level. For debugging purposes, it is moreover
possible to set the environment variable `PYMOR_CACHE_DISABLE=1` which overrides
any call to :func:`enable_caching`.
A cache region can be emptied using :meth:`CacheRegion.clear`. The function
:func:`clear_caches` clears each cache region registered in `cache_regions`.
"""
import atexit
from collections import OrderedDict
from copy import deepcopy
import functools
import getpass
import hashlib
import inspect
from numbers import Number
import os
import tempfile
from types import MethodType
import diskcache
import numpy as np
from pymor.core.base import ImmutableObject
from pymor.core.defaults import defaults, defaults_changes
from pymor.core.exceptions import CacheKeyGenerationError
from pymor.core.logger import getLogger
from pymor.core.pickle import dumps
from pymor.parameters.base import Mu, Parameters
@atexit.register
def cleanup_non_persistent_regions():
for region in cache_regions.values():
if not region.persistent:
region.clear()
def _safe_filename(old_name):
return ''.join(x for x in old_name if (x.isalnum() or x in '._- '))
class CacheRegion:
"""Base class for all pyMOR cache regions.
Attributes
----------
persistent
If `True`, cache entries are kept between multiple
program runs.
"""
persistent = False
def get(self, key):
"""Return cache entry for given key.
Parameters
----------
key
The key for the cache entry.
Returns
-------
`(True, entry)`
in case the `key` has been found in the cache region.
`(False, None)`
in case the `key` is not present in the cache region.
"""
raise NotImplementedError
def set(self, key, value):
"""Set cache entry for `key` to given `value`.
This method is usually called only once for
any given `key` (with the exemption of issues
due to concurrency).
"""
raise NotImplementedError
def clear(self):
"""Clear the entire cache region."""
raise NotImplementedError
class MemoryRegion(CacheRegion):
NO_VALUE = {}
def __init__(self, max_keys):
self.max_keys = max_keys
self._cache = OrderedDict()
def get(self, key):
value = self._cache.get(key, self.NO_VALUE)
if value is self.NO_VALUE:
return False, None
else:
return True, deepcopy(value)
def set(self, key, value):
if key in self._cache:
getLogger('pymor.core.cache.MemoryRegion').warning('Key already present in cache region, ignoring.')
return
if len(self._cache) == self.max_keys:
self._cache.popitem(last=False)
self._cache[key] = deepcopy(value)
def clear(self):
self._cache = OrderedDict()
class DiskRegion(CacheRegion):
def __init__(self, path, max_size, persistent):
self.path = path
self.max_size = max_size
self.persistent = persistent
self._cache = diskcache.Cache(path)
self._cache.reset('size_limit', int(max_size))
if not persistent:
self.clear()
def get(self, key):
has_key = key in self._cache
return has_key, self._cache.get(key, default=None)
def set(self, key, value):
has_key = key in self._cache
if has_key:
getLogger('pymor.core.cache.DiskRegion').warning('Key already present in cache region, ignoring.')
return
self._cache.set(key, value)
def clear(self):
self._cache.clear()
@defaults('disk_path', 'disk_max_size', 'persistent_path', 'persistent_max_size', 'memory_max_keys')
def default_regions(disk_path=os.path.join(tempfile.gettempdir(), 'pymor.cache.' + getpass.getuser()),
disk_max_size=1024 ** 3,
persistent_path=os.path.join(tempfile.gettempdir(), 'pymor.persistent.cache.' + getpass.getuser()),
persistent_max_size=1024 ** 3,
memory_max_keys=1000):
parse_size_string = lambda size: \
int(size[:-1]) * 1024 if size[-1] == 'K' else \
int(size[:-1]) * 1024 ** 2 if size[-1] == 'M' else \
int(size[:-1]) * 1024 ** 3 if size[-1] == 'G' else \
int(size)
if isinstance(disk_max_size, str):
disk_max_size = parse_size_string(disk_max_size)
cache_regions['disk'] = DiskRegion(path=disk_path, max_size=disk_max_size, persistent=False)
cache_regions['persistent'] = DiskRegion(path=persistent_path, max_size=persistent_max_size, persistent=True)
cache_regions['memory'] = MemoryRegion(memory_max_keys)
cache_regions = {}
_caching_disabled = int(os.environ.get('PYMOR_CACHE_DISABLE', 0)) == 1
if _caching_disabled:
getLogger('pymor.core.cache').warning('caching globally disabled by environment')
def enable_caching():
"""Globally enable caching."""
global _caching_disabled
_caching_disabled = int(os.environ.get('PYMOR_CACHE_DISABLE', 0)) == 1
def disable_caching():
"""Globally disable caching."""
global _caching_disabled
_caching_disabled = True
def clear_caches():
"""Clear all cache regions."""
for r in cache_regions.values():
r.clear()
class CacheableObject(ImmutableObject):
"""Base class for anything that wants to use our built-in caching.
Attributes
----------
cache_region
Name of the :class:`CacheRegion` to use. Must correspond to a key in
the :attr:`cache_regions` dict. If `None` or `'none'`, caching
is disabled.
cache_id
Identifier for the object instance on which a cached method is called.
"""
cache_region = None
cache_id = None
def disable_caching(self):
"""Disable caching for this instance."""
self.__dict__['cache_region'] = None
self.__dict__['cache_id'] = None
def enable_caching(self, region, cache_id=None):
"""Enable caching for this instance.
.. warning::
Note that using :meth:`~pymor.core.base.ImmutableObject.with_`
will reset :attr:`cache_region` and :attr:`cache_id` to their class
defaults.
Parameters
----------
region
Name of the |CacheRegion| to use. Must correspond to a key in
the :attr:`cache_regions` dict. If `None` or `'none'`, caching
is disabled.
cache_id
Identifier for the object instance on which a cached method is called.
Must be specified when `region` is :attr:`~CacheRegion.persistent`.
When `region` is not :attr:`~CacheRegion.persistent` and no `cache_id`
is given, the object's :attr:`~pymor.core.base.BasicObject.uid`
is used instead.
"""
self.__dict__['cache_id'] = cache_id
if region in (None, 'none'):
self.__dict__['cache_region'] = None
else:
self.__dict__['cache_region'] = region
r = cache_regions.get(region, None)
if r and r.persistent and cache_id is None:
raise ValueError('For persistent CacheRegions a cache_id has to be specified.')
def cached_method_call(self, method, *args, **kwargs):
"""Call a given `method` and cache the return value.
This method can be used as an alternative to the :func:`cached`
decorator.
Parameters
----------
method
The method that is to be called. This has to be a method
of `self`.
args
Positional arguments for `method`.
kwargs
Keyword arguments for `method`
Returns
-------
The (possibly cached) return value of `method(*args, **kwargs)`.
"""
assert isinstance(method, MethodType)
if _caching_disabled or self.cache_region is None:
return method(*args, **kwargs)
params = inspect.signature(method).parameters
if any(v.kind == v.VAR_POSITIONAL for v in params.values()):
raise NotImplementedError
argnames = list(params.keys())[1:] # first argument is self
defaults = {k: v.default for k, v in params.items() if v.default is not v.empty}
return self._cached_method_call(method, False, argnames, defaults, args, kwargs)
def _cached_method_call(self, method, pass_self, argnames, defaults, args, kwargs):
if not cache_regions:
default_regions()
try:
region = cache_regions[self.cache_region]
except KeyError:
raise KeyError(f'No cache region "{self.cache_region}" found')
# id for self
assert self.cache_id or not region.persistent
self_id = self.cache_id or self.uid
# ensure that passing a value as positional or keyword argument does not matter
kwargs.update(zip(argnames, args))
# ensure the values of optional parameters enter the cache key
if defaults:
kwargs = dict(defaults, **kwargs)
key = build_cache_key((method.__name__, self_id, kwargs))
found, value = region.get(key)
if found:
value, cached_defaults_changes = value
if cached_defaults_changes != defaults_changes():
getLogger('pymor.core.cache').warning('pyMOR defaults have been changed. Cached result may be wrong.')
return value
else:
self.logger.debug(f'creating new cache entry for {self.__class__.__name__}.{method.__name__}')
value = method(self, **kwargs) if pass_self else method(**kwargs)
region.set(key, (value, defaults_changes()))
return value
def cached(function):
"""Decorator to make a method of `CacheableObject` actually cached."""
params = inspect.signature(function).parameters
if any(v.kind == v.VAR_POSITIONAL for v in params.values()):
raise NotImplementedError
argnames = list(params.keys())[1:] # first argument is self
defaults = {k: v.default for k, v in params.items() if v.default is not v.empty}
@functools.wraps(function)
def wrapper(self, *args, **kwargs):
if _caching_disabled or self.cache_region is None:
return function(self, *args, **kwargs)
return self._cached_method_call(function, True, argnames, defaults, args, kwargs)
return wrapper
NoneType = type(None)
def build_cache_key(obj):
def transform_obj(obj):
t = type(obj)
if t in (NoneType, bool, int, float, str, bytes):
return obj
elif t is np.ndarray:
if obj.dtype == object:
raise CacheKeyGenerationError('Cannot | |
by_value=True, missing=None, data_key="paymentState"
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderChangePaymentStateAction(**data)
class StagedOrderChangeShipmentStateActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderChangeShipmentStateAction`."
shipment_state = marshmallow_enum.EnumField(
types.ShipmentState, by_value=True, missing=None, data_key="shipmentState"
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderChangeShipmentStateAction(**data)
class StagedOrderChangeTaxCalculationModeActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderChangeTaxCalculationModeAction`."
tax_calculation_mode = marshmallow_enum.EnumField(
types.TaxCalculationMode, by_value=True, data_key="taxCalculationMode"
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderChangeTaxCalculationModeAction(**data)
class StagedOrderChangeTaxModeActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderChangeTaxModeAction`."
tax_mode = marshmallow_enum.EnumField(
types.TaxMode, by_value=True, data_key="taxMode"
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderChangeTaxModeAction(**data)
class StagedOrderChangeTaxRoundingModeActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderChangeTaxRoundingModeAction`."
tax_rounding_mode = marshmallow_enum.EnumField(
types.RoundingMode, by_value=True, data_key="taxRoundingMode"
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderChangeTaxRoundingModeAction(**data)
class StagedOrderImportCustomLineItemStateActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderImportCustomLineItemStateAction`."
custom_line_item_id = marshmallow.fields.String(
allow_none=True, data_key="customLineItemId"
)
state = marshmallow.fields.Nested(
nested="commercetools.schemas._order.ItemStateSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
many=True,
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderImportCustomLineItemStateAction(**data)
class StagedOrderImportLineItemStateActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderImportLineItemStateAction`."
line_item_id = marshmallow.fields.String(allow_none=True, data_key="lineItemId")
state = marshmallow.fields.Nested(
nested="commercetools.schemas._order.ItemStateSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
many=True,
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderImportLineItemStateAction(**data)
class StagedOrderRemoveCustomLineItemActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderRemoveCustomLineItemAction`."
custom_line_item_id = marshmallow.fields.String(
allow_none=True, data_key="customLineItemId"
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderRemoveCustomLineItemAction(**data)
class StagedOrderRemoveDeliveryActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderRemoveDeliveryAction`."
delivery_id = marshmallow.fields.String(allow_none=True, data_key="deliveryId")
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderRemoveDeliveryAction(**data)
class StagedOrderRemoveDiscountCodeActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderRemoveDiscountCodeAction`."
discount_code = marshmallow.fields.Nested(
nested="commercetools.schemas._discount_code.DiscountCodeReferenceSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
data_key="discountCode",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderRemoveDiscountCodeAction(**data)
class StagedOrderRemoveItemShippingAddressActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderRemoveItemShippingAddressAction`."
address_key = marshmallow.fields.String(allow_none=True, data_key="addressKey")
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderRemoveItemShippingAddressAction(**data)
class StagedOrderRemoveLineItemActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderRemoveLineItemAction`."
line_item_id = marshmallow.fields.String(allow_none=True, data_key="lineItemId")
quantity = marshmallow.fields.Integer(allow_none=True, missing=None)
external_price = marshmallow.fields.Nested(
nested="commercetools.schemas._common.MoneySchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
missing=None,
data_key="externalPrice",
)
external_total_price = marshmallow.fields.Nested(
nested="commercetools.schemas._cart.ExternalLineItemTotalPriceSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
missing=None,
data_key="externalTotalPrice",
)
shipping_details_to_remove = marshmallow.fields.Nested(
nested="commercetools.schemas._cart.ItemShippingDetailsDraftSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
missing=None,
data_key="shippingDetailsToRemove",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderRemoveLineItemAction(**data)
class StagedOrderRemoveParcelFromDeliveryActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderRemoveParcelFromDeliveryAction`."
parcel_id = marshmallow.fields.String(allow_none=True, data_key="parcelId")
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderRemoveParcelFromDeliveryAction(**data)
class StagedOrderRemovePaymentActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderRemovePaymentAction`."
payment = marshmallow.fields.Nested(
nested="commercetools.schemas._payment.PaymentReferenceSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderRemovePaymentAction(**data)
class StagedOrderSchema(OrderSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrder`."
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
return types.StagedOrder(**data)
class StagedOrderSetBillingAddressActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetBillingAddressAction`."
address = marshmallow.fields.Nested(
nested="commercetools.schemas._common.AddressSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
missing=None,
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetBillingAddressAction(**data)
class StagedOrderSetCountryActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetCountryAction`."
country = marshmallow.fields.String(allow_none=True, missing=None)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetCountryAction(**data)
class StagedOrderSetCustomFieldActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetCustomFieldAction`."
name = marshmallow.fields.String(allow_none=True)
value = marshmallow.fields.Raw(allow_none=True, missing=None)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetCustomFieldAction(**data)
class StagedOrderSetCustomLineItemCustomFieldActionSchema(
StagedOrderUpdateActionSchema
):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetCustomLineItemCustomFieldAction`."
custom_line_item_id = marshmallow.fields.String(
allow_none=True, data_key="customLineItemId"
)
name = marshmallow.fields.String(allow_none=True)
value = marshmallow.fields.Raw(allow_none=True, missing=None)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetCustomLineItemCustomFieldAction(**data)
class StagedOrderSetCustomLineItemCustomTypeActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetCustomLineItemCustomTypeAction`."
custom_line_item_id = marshmallow.fields.String(
allow_none=True, data_key="customLineItemId"
)
type = marshmallow.fields.Nested(
nested="commercetools.schemas._type.TypeReferenceSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
missing=None,
)
fields = FieldContainerField(allow_none=True, missing=None)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetCustomLineItemCustomTypeAction(**data)
class StagedOrderSetCustomLineItemShippingDetailsActionSchema(
StagedOrderUpdateActionSchema
):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetCustomLineItemShippingDetailsAction`."
custom_line_item_id = marshmallow.fields.String(
allow_none=True, data_key="customLineItemId"
)
shipping_details = marshmallow.fields.Nested(
nested="commercetools.schemas._cart.ItemShippingDetailsDraftSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
missing=None,
data_key="shippingDetails",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetCustomLineItemShippingDetailsAction(**data)
class StagedOrderSetCustomLineItemTaxAmountActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetCustomLineItemTaxAmountAction`."
custom_line_item_id = marshmallow.fields.String(
allow_none=True, data_key="customLineItemId"
)
external_tax_amount = marshmallow.fields.Nested(
nested="commercetools.schemas._cart.ExternalTaxAmountDraftSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
missing=None,
data_key="externalTaxAmount",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetCustomLineItemTaxAmountAction(**data)
class StagedOrderSetCustomLineItemTaxRateActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetCustomLineItemTaxRateAction`."
custom_line_item_id = marshmallow.fields.String(
allow_none=True, data_key="customLineItemId"
)
external_tax_rate = marshmallow.fields.Nested(
nested="commercetools.schemas._cart.ExternalTaxRateDraftSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
missing=None,
data_key="externalTaxRate",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetCustomLineItemTaxRateAction(**data)
class StagedOrderSetCustomShippingMethodActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetCustomShippingMethodAction`."
shipping_method_name = marshmallow.fields.String(
allow_none=True, data_key="shippingMethodName"
)
shipping_rate = marshmallow.fields.Nested(
nested="commercetools.schemas._shipping_method.ShippingRateDraftSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
data_key="shippingRate",
)
tax_category = marshmallow.fields.Nested(
nested="commercetools.schemas._tax_category.TaxCategoryReferenceSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
missing=None,
data_key="taxCategory",
)
external_tax_rate = marshmallow.fields.Nested(
nested="commercetools.schemas._cart.ExternalTaxRateDraftSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
missing=None,
data_key="externalTaxRate",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetCustomShippingMethodAction(**data)
class StagedOrderSetCustomTypeActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetCustomTypeAction`."
type = marshmallow.fields.Nested(
nested="commercetools.schemas._type.TypeReferenceSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
missing=None,
)
fields = FieldContainerField(allow_none=True, missing=None)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetCustomTypeAction(**data)
class StagedOrderSetCustomerEmailActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetCustomerEmailAction`."
email = marshmallow.fields.String(allow_none=True, missing=None)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetCustomerEmailAction(**data)
class StagedOrderSetCustomerGroupActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetCustomerGroupAction`."
customer_group = marshmallow.fields.Nested(
nested="commercetools.schemas._customer_group.CustomerGroupReferenceSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
missing=None,
data_key="customerGroup",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetCustomerGroupAction(**data)
class StagedOrderSetCustomerIdActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetCustomerIdAction`."
customer_id = marshmallow.fields.String(
allow_none=True, missing=None, data_key="customerId"
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetCustomerIdAction(**data)
class StagedOrderSetDeliveryAddressActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetDeliveryAddressAction`."
delivery_id = marshmallow.fields.String(allow_none=True, data_key="deliveryId")
address = marshmallow.fields.Nested(
nested="commercetools.schemas._common.AddressSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
missing=None,
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetDeliveryAddressAction(**data)
class StagedOrderSetDeliveryItemsActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetDeliveryItemsAction`."
delivery_id = marshmallow.fields.String(allow_none=True, data_key="deliveryId")
items = marshmallow.fields.Nested(
nested="commercetools.schemas._order.DeliveryItemSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
many=True,
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetDeliveryItemsAction(**data)
class StagedOrderSetLineItemCustomFieldActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetLineItemCustomFieldAction`."
line_item_id = marshmallow.fields.String(allow_none=True, data_key="lineItemId")
name = marshmallow.fields.String(allow_none=True)
value = marshmallow.fields.Raw(allow_none=True, missing=None)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetLineItemCustomFieldAction(**data)
class StagedOrderSetLineItemCustomTypeActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetLineItemCustomTypeAction`."
line_item_id = marshmallow.fields.String(allow_none=True, data_key="lineItemId")
type = marshmallow.fields.Nested(
nested="commercetools.schemas._type.TypeReferenceSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
missing=None,
)
fields = FieldContainerField(allow_none=True, missing=None)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetLineItemCustomTypeAction(**data)
class StagedOrderSetLineItemPriceActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetLineItemPriceAction`."
line_item_id = marshmallow.fields.String(allow_none=True, data_key="lineItemId")
external_price = marshmallow.fields.Nested(
nested="commercetools.schemas._common.MoneySchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
missing=None,
data_key="externalPrice",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetLineItemPriceAction(**data)
class StagedOrderSetLineItemShippingDetailsActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetLineItemShippingDetailsAction`."
line_item_id = marshmallow.fields.String(allow_none=True, data_key="lineItemId")
shipping_details = marshmallow.fields.Nested(
nested="commercetools.schemas._cart.ItemShippingDetailsDraftSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
missing=None,
data_key="shippingDetails",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetLineItemShippingDetailsAction(**data)
class StagedOrderSetLineItemTaxAmountActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetLineItemTaxAmountAction`."
line_item_id = marshmallow.fields.String(allow_none=True, data_key="lineItemId")
external_tax_amount = marshmallow.fields.Nested(
nested="commercetools.schemas._cart.ExternalTaxAmountDraftSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
missing=None,
data_key="externalTaxAmount",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetLineItemTaxAmountAction(**data)
class StagedOrderSetLineItemTaxRateActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetLineItemTaxRateAction`."
line_item_id = marshmallow.fields.String(allow_none=True, data_key="lineItemId")
external_tax_rate = marshmallow.fields.Nested(
nested="commercetools.schemas._cart.ExternalTaxRateDraftSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
missing=None,
data_key="externalTaxRate",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetLineItemTaxRateAction(**data)
class StagedOrderSetLineItemTotalPriceActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetLineItemTotalPriceAction`."
line_item_id = marshmallow.fields.String(allow_none=True, data_key="lineItemId")
external_total_price = marshmallow.fields.Nested(
nested="commercetools.schemas._cart.ExternalLineItemTotalPriceSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
missing=None,
data_key="externalTotalPrice",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetLineItemTotalPriceAction(**data)
class StagedOrderSetLocaleActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetLocaleAction`."
locale = marshmallow.fields.String(allow_none=True, missing=None)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetLocaleAction(**data)
class StagedOrderSetOrderNumberActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetOrderNumberAction`."
order_number = marshmallow.fields.String(
allow_none=True, missing=None, data_key="orderNumber"
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetOrderNumberAction(**data)
class StagedOrderSetOrderTotalTaxActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetOrderTotalTaxAction`."
external_total_gross = marshmallow.fields.Nested(
nested="commercetools.schemas._common.MoneySchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
data_key="externalTotalGross",
)
external_tax_portions = marshmallow.fields.Nested(
nested="commercetools.schemas._cart.TaxPortionSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
many=True,
missing=None,
data_key="externalTaxPortions",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetOrderTotalTaxAction(**data)
class StagedOrderSetParcelItemsActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetParcelItemsAction`."
parcel_id = marshmallow.fields.String(allow_none=True, data_key="parcelId")
items = marshmallow.fields.Nested(
nested="commercetools.schemas._order.DeliveryItemSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
many=True,
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetParcelItemsAction(**data)
class StagedOrderSetParcelMeasurementsActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetParcelMeasurementsAction`."
parcel_id = marshmallow.fields.String(allow_none=True, data_key="parcelId")
measurements = marshmallow.fields.Nested(
nested="commercetools.schemas._order.ParcelMeasurementsSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
missing=None,
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetParcelMeasurementsAction(**data)
class StagedOrderSetParcelTrackingDataActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetParcelTrackingDataAction`."
parcel_id = marshmallow.fields.String(allow_none=True, data_key="parcelId")
tracking_data = marshmallow.fields.Nested(
nested="commercetools.schemas._order.TrackingDataSchema",
unknown=marshmallow.EXCLUDE,
allow_none=True,
missing=None,
data_key="trackingData",
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetParcelTrackingDataAction(**data)
class StagedOrderSetReturnPaymentStateActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetReturnPaymentStateAction`."
return_item_id = marshmallow.fields.String(allow_none=True, data_key="returnItemId")
payment_state = marshmallow_enum.EnumField(
types.ReturnPaymentState, by_value=True, data_key="paymentState"
)
class Meta:
unknown = marshmallow.EXCLUDE
@marshmallow.post_load
def post_load(self, data):
del data["action"]
return types.StagedOrderSetReturnPaymentStateAction(**data)
class StagedOrderSetReturnShipmentStateActionSchema(StagedOrderUpdateActionSchema):
"Marshmallow schema for :class:`commercetools.types.StagedOrderSetReturnShipmentStateAction`."
return_item_id = marshmallow.fields.String(allow_none=True, data_key="returnItemId")
shipment_state = marshmallow_enum.EnumField(
types.ReturnShipmentState, by_value=True, | |
<gh_stars>1-10
""" Read in cluster from Nbody simulations or generate an Nbody cluster
"""
__author__ = "<NAME>"
__all__ = [
"load_cluster",
"advance_cluster",
]
import numpy as np
try:
from galpy.util import conversion
except:
import galpy.util.bovy_conversion as conversion
import os, struct
from .cluster import StarCluster
from .operations import *
from .orbit import initialize_orbit
# Try Importing AMUSE. Only necessary for _get_amuse_particles
try:
import amuse.units.units as u
from amuse.io import read_set_from_file
except:
pass
def load_cluster(
ctype="snapshot",
units = "pckms",
origin = "cluster",
ofile=None,
orbit=None,
filename=None,
particles=None,
load_function=None,
**kwargs,
):
"""Load a StarCluster snapshot from a generic code output
Parameters
__________
ctype : str
Type of file being loaded
Currently supports:
- nbody6
- nbody6se
- nbody6pp or nbody6++
- nemo or gyrfalcon
- snaptrim
- snapauto
- clustertools
- snapshot
- astropy_table
units : str
units of input data (default: kpckms)
origin : str
origin of input data (default: cluster)
ofile : file
an already opened file containing orbit information (default: None)
orbit : class
a galpy orbit to be used for the StarCluster's orbital information (default: None)
filename : str
name of file to be opened (optional - necessary if no defaults assigned to ctype) (default: None)
particles : particles
AMUSE particle dataset (default: None)
or `~astropy.table.Table` instance if `ctype` is "astropy_table".
load_function : function
use a custom function to load data (default : None)
Returns
-------
cluster : class
StarCluster
Other Parameters
----------------
ofilename : str
orbit filename if ofile or orbit is not given
ounits : str
units of orbital information (else assumed equal to StarCluster.units)
nsnap : int
if a specific snapshot is to be read in instead of starting from zero
nzfill : int
value for zfill when reading and writing snapshots (Default: 5)
delimiter : str
choice of delimiter when reading ascii/csv files (Default: ',')
wdir : str
working directory of snapshots if not current directory
snapdir : str
directory of snapshot (Default: './')
snapbase : str
base for snapshot filename (Default: '')
snapend : str
end for snapshot filename (Default: '')
skiprows : int
number of rows to skip when reading in snapshot (Default: 0)
initialize : bool
initialize a galpy orbit after reading in orbital information (default: False)
projected : bool
calculate projected values as well as 3D values (Default: True)
sortstars : bool
sort stars in order from closes to the origin to the farthest (default: True)
column_mapper : dict
see _get_astropy_table
verbose : bool
print additional information to screen while loading (default : False)
give : str
set what parameters are read in from nemo/gyrfalcon (default: 'mxv')
Currently only accepts 'mxvpqael' as an alternative.
deltat : integer
number of nbody timesteps forward to advance to next Nbody6++ timestep (default = 1)
History
_______
2018 - Written - Webb (UofT)
"""
wdir = kwargs.get("wdir", "./")
if wdir[-1] != '/':
wdir+='/'
filename=_get_filename(filename,**kwargs)
initialize = kwargs.get("initialize", False)
if "ofilename" in kwargs and ofile is None:
ofile = open(wdir + kwargs["ofilename"], "r")
if load_function is not None:
ctype='custom'
if particles is not None:
cluster=load_function(ctype=ctype,units=units,origin=origin,ofile=ofile,orbit=orbit,particles=particles,**kwargs)
elif filename is not None:
cluster=load_function(ctype=ctype,units=units,origin=origin,ofile=ofile,orbit=orbit,filename=filename,**kwargs)
else:
cluster=load_function(ctype=ctype,units=units,origin=origin,ofile=ofile,orbit=orbit,**kwargs)
elif ctype == "nbody6":
# With stellar evolution turned ON, read in OUT3, OUT33, fort.82 and fort.83.
if os.path.isfile("%sOUT3" % wdir):
out3 = open("%sOUT3" % wdir, "rb")
else:
out3 = None
if os.path.isfile("%sOUT33" % wdir):
out33 = open("%sOUT33" % wdir, "rb")
else:
out33=None
if os.path.isfile("%sfort.82" % wdir):
fort82 = open("%sfort.82" % wdir, "r")
else:
fort82=None
if os.path.isfile("%sfort.83" % wdir):
fort83 = open("%sfort.83" % wdir, "r")
else:
fort83=None
cluster = _get_nbody6(out3, out33, fort82=fort82, fort83=fort83, ofile=ofile, advance=False, **kwargs)
elif ctype == "nbody6pp" or ctype=='nbody6++':
nsnap = kwargs.get("nsnap", 0)
deltat=kwargs.pop('deltat',1)
if os.path.isfile("%sconf.3_%s" % (wdir,str(nsnap))):
conf3 = open("%sconf.3_%s" % (wdir,str(nsnap)), "rb")
else:
conf3=None
if os.path.isfile("%sbev.82_%s" % (wdir,str(nsnap))):
bev82 = open("%sbev.82_%s" % (wdir,str(nsnap)), "r")
else:
bev82=None
if os.path.isfile("%ssev.83_%s" % (wdir,str(nsnap))):
sev83 = open("%ssev.83_%s" % (wdir,str(nsnap)), "r")
else:
sev83=None
cluster = _get_nbody6pp(conf3, bev82=bev82, sev83=sev83, ofile=ofile, advance=False,deltat=deltat, **kwargs)
elif ctype == "gyrfalcon" or ctype=='nemo':
# Read in snapshot from gyrfalcon.
print('DEBUG NEMO FILENAME',filename)
filein = open(filename, "r")
cluster = _get_gyrfalcon(filein, "WDunits", "galaxy", advance=False, **kwargs)
elif ctype=='amuse':
if filename is not None:
filetype=kwargs.pop("filetype","hdf5")
particles = read_set_from_file(filename, filetype)
cluster = _get_amuse_particles(particles, units=units, origin=origin, ofile=ofile, **kwargs)
elif ctype == "snapshot":
# Read in standard generic snapshot
col_names = kwargs.pop("col_names", ["m", "x", "y", "z", "vx", "vy", "vz"])
col_nums = kwargs.pop("col_nums", [0, 1, 2, 3, 4, 5, 6])
cluster = _get_snapshot(
filename=filename,
col_names=col_names,
col_nums=col_nums,
units=units,
origin=origin,
ofile=ofile,
advance=False,
**kwargs,
)
elif ctype.lower() == "astropy_table":
column_mapper = kwargs.pop("column_mapper", None)
# Read data from astropy table
cluster = _get_astropy_table(
particles,
column_mapper=column_mapper,
units=units,
origin=origin,
ofile=ofile,
**kwargs
)
else:
print("NO CTYPE GIVEN")
return 0
if ofile is not None:
cluster.ofilename=ofile.name.split('/')[-1]
# Add galpy orbit if given
if orbit is not None:
cluster.orbit = orbit
if cluster.units == "pckms":
t = (cluster.tphys / 1000.0) / conversion.time_in_Gyr(ro=8.0, vo=220.0)
cluster.add_orbit(
orbit.x(t) * 1000.0,
orbit.y(t) * 1000.0,
orbit.z(t) * 1000.0,
orbit.vx(t),
orbit.vy(t),
orbit.vz(t),
)
if cluster.units == "kpckms":
cluster.add_orbit(
orbit.x(t),
orbit.y(t),
orbit.z(t),
orbit.vx(t),
orbit.vy(t),
orbit.vz(t),
)
elif cluster.units == "nbody":
t = (cluster.tphys * cluster.tbar / 1000.0) / conversion.time_in_Gyr(
ro=8.0, vo=220.0
)
cluster.add_orbit(
orbit.x(t) * 1000.0 / cluster.rbar,
orbit.y(t) * 1000.0 / cluster.rbar,
orbit.z(t) * 1000.0 / cluster.rbar,
orbit.vx(t) / cluster.vbar,
orbit.vy(t) / cluster.vbar,
orbit.vz(t) / cluster.vbar,
)
elif cluster.units == "galpy":
t = cluster.tphys
cluster.add_orbit(
orbit.x(t) / 8.0,
orbit.y(t) / 8.0,
orbit.z(t) / 8.0,
orbit.vx(t) / 220.0,
orbit.vy(t) / 220.0,
orbit.vz(t) / 220.0,
)
units0, origin0, rorder0, rorder_origin0 = save_cluster(cluster)
cluster.to_cluster(sortstars=False)
cluster.find_centre()
return_cluster(cluster, units0, origin0, rorder0, rorder_origin0)
elif initialize:
initialize_orbit(cluster)
return cluster
def advance_cluster(
cluster,
load_function=None,
ofile=None,
orbit=None,
filename=None,
**kwargs,
):
"""Advance a loaded StarCluster snapshot to the next timestep
- ofile or orbit need to be provded again, same as load_cluster.
- Be sure that advance is set to True so next line of orbit file is read in
- if last snapshot has been reached, returns an empty StarCluster
Parameters
----------
cluster - class
StarCluster to be advanced
load_function : function
use a custom function to load data (default : None)
ofile : file
an already opened file containing orbit information (default: None)
orbit : class
a galpy orbit to be used for the StarCluster's orbital information (default: None)
filename :str
name of file to be opened (optional - necessary if no defaults assigned to ctype) (default: None)
Returns
-------
cluster : class
StarCluster
Other Parameters
----------------
Same as load_cluster except for:
deltat : integer
number of nbody timesteps forward to advance to next Nbody6++ timestep (default = 1)
History
-------
2018 - Written - Webb (UofT)
"""
advance_kwargs, kwargs = _get_advanced_kwargs(cluster, **kwargs)
filename=_get_filename(filename,**advance_kwargs)
wdir = advance_kwargs.get("wdir", "./")
if wdir[-1] != '/':
wdir+='/'
# Continue reading in cluster opened in _get_cluster()
if load_function is not None:
ctype='custom'
if filename is not None:
cluster=load_function(ctype=ctype,units=cluster.units_init,origin=cluster.origin_init,ofile=ofile,orbit=orbit,filename=filename,advance=True,**advance_kwargs,**kwargs)
else:
cluster=load_function(ctype=ctype,units=cluster.units_init,origin=cluster.origin_init,ofile=ofile,orbit=orbit,advance=True,**advance_kwargs,**kwargs)
elif cluster.ctype == "nbody6":
cluster = _get_nbody6(
cluster.sfile, cluster.bfile, cluster.bsefile, cluster.ssefile, advance=True, **advance_kwargs
)
elif cluster.ctype == "nbody6pp" or cluster.ctype == "nbody6++":
deltat=kwargs.pop('deltat',1)
nsnap = advance_kwargs.pop("nsnap") + deltat - 1
if os.path.isfile("%sconf.3_%s" % (wdir,str(nsnap))):
conf3 = open("%sconf.3_%s" % (wdir,str(nsnap)), "rb")
else:
conf3=None
if os.path.isfile("%sbev.82_%s" % (wdir,str(nsnap))):
bev82 = open("%sbev.82_%s" % (wdir,str(nsnap)), "r")
else:
bev82=None
if os.path.isfile("%ssev.3_%s" % (wdir,str(nsnap))):
sev83 = open("%ssev.3_%s" % (wdir,str(nsnap)), "r")
else:
sev83=None
cluster = _get_nbody6pp(conf3, bev82=bev82, sev83=sev83, ofile=ofile, advance=True,nsnap=nsnap,deltat=deltat,**advance_kwargs)
elif cluster.ctype == 'nbody6':
cluster = _get_nbody6(
cluster.sfile, cluster.bfile, cluster.bsefile, cluster.ssefile, advance=True, **advance_kwargs
)
elif cluster.ctype == "gyrfalcon" or cluster.ctype=="nemo":
if filename is None:
cluster = _get_gyrfalcon(
cluster.sfile,
units="WDunits",
origin="galaxy",
ofile=ofile,
advance=True,
**advance_kwargs
)
else:
filein = open(filename, "r")
cluster = _get_gyrfalcon(
filein,
units="WDunits",
origin="galaxy",
ofile=ofile,
advance=True,
**advance_kwargs
)
elif cluster.ctype == "snapshot":
col_names = kwargs.pop("col_names", ["m", "x", "y", "z", "vx", "vy", "vz"])
col_nums = kwargs.pop("col_nums", [0, 1, 2, 3, 4, 5, 6])
cluster = _get_snapshot(
filename=filename,
col_names=col_names,
col_nums=col_nums,
units=cluster.units_init,
origin=cluster.origin_init,
ofile=ofile,
advance=True,
**advance_kwargs
)
else:
cluster = StarCluster(ctype=cluster.ctype,units=cluster.units_init,origin=cluster.origin_init,**advance_kwargs)
# Check for restart
if cluster.ntot == 0.0:
#print('NTOT = 0',cluster.wdir,advance_kwargs.get('wdir','./'))
try:
wdir = cluster.wdir + "cont/"
except:
wdir = "./cont/"
try:
ofilename = ofile.name.split('/')[-1]
ofile=None
except:
ofile = None
| |
<reponame>NCRAR/psiaudio
import pytest
from collections import Counter, deque
import numpy as np
from psiaudio.calibration import FlatCalibration
from psiaudio.pipeline import extract_epochs
from psiaudio.queue import FIFOSignalQueue, InterleavedFIFOSignalQueue
from psiaudio.stim import Cos2EnvelopeFactory, ToneFactory
rate = 76.0
isi = np.round(1 / rate, 5)
def make_tone(fs, frequency=250, duration=5e-3):
calibration = FlatCalibration.as_attenuation()
tone = ToneFactory(fs=fs, level=0, frequency=frequency,
calibration=calibration)
return Cos2EnvelopeFactory(fs=fs, start_time=0, rise_time=0.5e-3,
duration=duration, input_factory=tone)
def make_queue(fs, ordering, frequencies, trials, duration=5e-3, isi=isi):
if ordering == 'FIFO':
queue = FIFOSignalQueue(fs)
elif ordering == 'interleaved':
queue = InterleavedFIFOSignalQueue(fs)
else:
raise ValueError(f'Unrecognized queue ordering {ordering}')
conn = deque()
queue.connect(conn.append, 'added')
removed_conn = deque()
queue.connect(removed_conn.append, 'removed')
keys = []
tones = []
for frequency in frequencies:
t = make_tone(fs, frequency=frequency, duration=duration)
delay = max(isi - duration, 0)
md = {'frequency': frequency}
k = queue.append(t, trials, delay, metadata=md)
keys.append(k)
tones.append(t)
return queue, conn, removed_conn, keys, tones
def test_long_tone_queue(fs):
queue, conn, rem_conn, _, _ = \
make_queue(fs, 'interleaved', [1e3, 5e3], 5, duration=1, isi=1)
waveforms = []
n_pop = round(fs * 0.25)
for i in range(16):
w = queue.pop_buffer(n_pop)
waveforms.append(w)
waveforms = np.concatenate(waveforms, axis=-1)
waveforms.shape = 4, -1
assert waveforms.shape == (4, round(fs))
assert np.all(waveforms[0] == waveforms[2])
assert np.all(waveforms[1] == waveforms[3])
assert np.any(waveforms[0] != waveforms[1])
def test_fifo_queue_pause_with_requeue(fs):
# Helper function to track number of remaining keys
def _adjust_remaining(k1, k2, n):
nk1 = min(k1, n)
nk2 = min(n - nk1, k2)
return k1 - nk1, k2 - nk2
queue, conn, rem_conn, (k1, k2), (t1, t2) = \
make_queue(fs, 'FIFO', [1e3, 5e3], 100)
extractor_conn = deque()
extractor_rem_conn = deque()
queue.connect(extractor_conn.append, 'added')
queue.connect(extractor_rem_conn.append, 'removed')
# Generate the waveform template
n_t1 = t1.n_samples_remaining()
n_t2 = t2.n_samples_remaining()
t1_waveform = t1.next(n_t1)
t2_waveform = t2.next(n_t2)
waveforms = []
extractor = extract_epochs(fs=fs,
queue=extractor_conn,
removed_queue=extractor_rem_conn,
poststim_time=0,
buffer_size=0,
epoch_size=15e-3,
target=waveforms.extend)
# Track number of trials remaining
k1_left, k2_left = 100, 100
samples = int(round(fs))
# Since the queue uses the delay (between offset and onset of
# consecutive segments), we need to calculate the actual ISI since it
# may have been rounded to the nearest sample.
delay_samples = round((isi - t1.duration) * fs)
duration_samples = round(t1.duration * fs)
total_samples = duration_samples + delay_samples
actual_isi = total_samples / fs
###########################################################################
# First, queue up 2 seconds worth of trials
###########################################################################
waveform = queue.pop_buffer(samples * 2)
n_queued = np.floor(2 / actual_isi) + 1
t1_lb = 0
t2_lb = 100 * total_samples
t2_lb = int(t2_lb)
assert np.all(waveform[t1_lb:t1_lb + duration_samples] == t1_waveform)
assert np.all(waveform[t2_lb:t2_lb + duration_samples] == t2_waveform)
assert len(conn) == np.ceil(2 / actual_isi)
assert len(rem_conn) == 0
keys = [i['key'] for i in conn]
assert set(keys) == {k1, k2}
assert set(keys[:100]) == {k1}
assert set(keys[100:]) == {k2}
k1_left, k2_left = _adjust_remaining(k1_left, k2_left, n_queued)
assert queue.remaining_trials(k1) == k1_left
assert queue.remaining_trials(k2) == k2_left
conn.clear()
###########################################################################
# Now, pause
###########################################################################
# Pausing should remove all epochs queued up after 0.5s. After sending
# the first waveform to the extractor, we generate a new waveform to
# verify that no additional trials are queued and send that to the
# extractor.
queue.pause(round(0.5 * fs) / fs)
extractor.send(waveform[:round(0.5 * fs)])
# We need to add 1 to account for the very first trial.
n_queued = int(np.floor(2 / actual_isi)) + 1
n_kept = int(np.floor(0.5 / actual_isi)) + 1
# Now, fix the counters
k1_left, k2_left = _adjust_remaining(100, 100, n_kept)
# This is the total number that were removed when we paused.
n_removed = n_queued - n_kept
# Subtract 1 because we haven't fully captured the last trial that
# remains in the queue because the epoch_size was chosen such that the
# end of the epoch to be extracted is after 0.5s.
n_captured = n_kept - 1
assert len(waveforms) == n_captured
# Doing this will capture the final epoch.
waveform = queue.pop_buffer(samples)
assert np.all(waveform == 0)
extractor.send(waveform)
assert len(waveforms) == (n_captured + 1)
# Verify removal event is properly notifying the timestamp
rem_t0 = np.array([i['t0'] for i in rem_conn])
assert np.all(rem_t0 >= 0.5)
assert (rem_t0[0] % actual_isi) == pytest.approx(0, 0.1 / fs)
assert queue.remaining_trials(k1) == k1_left
assert queue.remaining_trials(k2) == k2_left
assert len(conn) == 0
assert len(rem_conn) == n_removed
rem_count = Counter(i['key'] for i in rem_conn)
assert rem_count[k1] == 100 - n_kept
assert rem_count[k2] == n_queued - 100
conn.clear()
rem_conn.clear()
queue.resume(samples * 1.5 / fs)
waveform = queue.pop_buffer(samples)
n_queued = np.floor(1 / actual_isi) + 1
k1_left, k2_left = _adjust_remaining(k1_left, k2_left, n_queued)
extractor.send(waveform)
assert len(conn) == np.floor(1 / actual_isi) + 1
assert queue.remaining_trials(k1) == k1_left
assert queue.remaining_trials(k2) == k2_left
assert len(conn) == np.floor(1 / actual_isi) + 1
keys += [i['key'] for i in conn]
conn.clear()
waveform = queue.pop_buffer(5 * samples)
n_queued = np.floor(5 / actual_isi) + 1
k1_left, k2_left = _adjust_remaining(k1_left, k2_left, n_queued)
extractor.send(waveform)
assert queue.remaining_trials(k1) == k1_left
assert queue.remaining_trials(k2) == k2_left
keys += [i['key'] for i in conn]
# We requeued 1.5 second worth of trials so need to factor this because
# keys (from conn) did not remove the removed keys.
assert len(keys) == (200 + n_removed)
# However, the extractor is smart enough to handle cancel appropriately
# and should only have the 200 we originally intended.
assert len(waveforms) == 200
# This should capture the 1-sample bug that sometimes occurs when using
# int() instead of round() with quirky sample rates (e.g., like with the
# RZ6).
n = len(t1_waveform)
waveforms = np.vstack(waveforms)
t1_waveforms = waveforms[:100]
t2_waveforms = waveforms[100:]
assert np.all(t1_waveforms[:, :n] == t1_waveform)
assert np.all(t2_waveforms[:, :n] == t2_waveform)
def test_queue_isi_with_pause(fs):
"""
Verifies that queue generates samples at the expected ISI and also verifies
pause functionality works as expected.
"""
queue, conn, _, _, (t1,) = make_queue(fs, 'FIFO', [250], 500)
duration = 1
samples = round(duration * fs)
queue.pop_buffer(samples)
expected_n = int(duration / isi) + 1
assert len(conn) == expected_n
# Pause is after `duration` seconds
queue.pause()
waveform = queue.pop_buffer(samples)
assert np.sum(waveform ** 2) == 0
assert len(conn) == int(duration / isi) + 1
# Resume after `duration` seconds. Note that tokens resume *immediately*.
queue.resume()
queue.pop_buffer(samples)
assert len(conn) == np.ceil(2 * duration / isi)
queue.pop_buffer(samples)
assert len(conn) == np.ceil(3 * duration / isi)
times = [u['t0'] for u in conn]
assert times[0] == 0
all_isi = np.diff(times)
# Since the queue uses the delay (between offset and onset of
# consecutive segments), we need to calculate the actual ISI since it
# may have been rounded to the nearest sample.
actual_isi = round((isi - t1.duration) * fs) / fs + t1.duration
# We paused the playout, so this means that we have a very long delay in
# the middle of the queue. Check for this delay, ensure that there's only
# one ISI with this delay and then verify that all other ISIs are the
# expected ISI given the tone pip duration.
expected_max_isi = round((duration + actual_isi) * fs) / fs
assert all_isi.max() == expected_max_isi
m = all_isi == all_isi.max()
assert sum(m) == 1
# Now, check that all other ISIs are as expected.
expected_isi = round(actual_isi * fs) / fs
np.testing.assert_almost_equal(all_isi[~m], expected_isi)
def test_fifo_queue_pause_resume_timing(fs):
trials = 20
samples = int(fs)
queue, conn, _, _, _ = make_queue(fs, 'FIFO', (1e3, 5e3), trials)
queue.pop_buffer(samples)
conn.clear()
queue.pause(0.1025)
queue.pop_buffer(samples)
queue.resume(0.6725)
queue.pop_buffer(samples)
t0 = [i['t0'] for i in conn]
assert t0[0] == round(0.6725 * fs) / fs
def test_fifo_queue_ordering(fs):
trials = 20
samples = round(fs)
queue, conn, _, (k1, k2), (t1, _) = \
make_queue(fs, 'FIFO', (1e3, 5e3), trials)
epoch_samples = round(t1.duration * fs)
waveforms = []
queue_empty = False
def mark_empty():
nonlocal queue_empty
queue_empty = True
extractor = extract_epochs(fs=fs,
queue=conn,
epoch_size=None,
poststim_time=0,
buffer_size=0,
target=waveforms.append,
empty_queue_cb=mark_empty)
waveform = queue.pop_buffer(samples)
extractor.send(waveform)
assert queue_empty
metadata = list(conn)
for md in metadata[:trials]:
assert k1 == md['key']
for md in metadata[trials:]:
assert k2 == md['key']
waveforms = np.concatenate(waveforms, axis=0)
assert waveforms.shape == (trials * 2, epoch_samples)
for w in waveforms[:trials]:
assert np.all(w == waveforms[0])
for w in waveforms[trials:]:
assert np.all(w == waveforms[trials])
assert np.any(waveforms[0] != waveforms[trials])
def test_interleaved_fifo_queue_ordering(fs):
samples = round(fs)
trials = 20
queue, conn, _, (k1, k2), (t1, _) = \
make_queue(fs, 'interleaved', (1e3, 5e3), trials)
epoch_samples = round(t1.duration * fs)
| |
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import plotly.express as px
import geopandas as gpd
from scipy import stats
from shapely.geometry import Polygon, MultiPoint
import json
from sklearn.cluster import DBSCAN
from geopy.distance import great_circle
from branca.colormap import linear
import folium
import datetime
from math import floor, ceil
# class Inspection():
# def __init__(self):
# print("Initializing class 'Inspection'")
def skewness_num_variables(df): # show_skewness_num_variables
numericFeaturesIndex = df.dtypes[df.dtypes=='float64'].index
skewedFeatures=df[numericFeaturesIndex].skew().sort_values(ascending=False)
skewness=pd.DataFrame({'Skew':skewedFeatures})
return skewness
def missing_values_per_variable(df, percent=100, dropCol=False): # sum_missing_values
listCol =[]
rowCount = df.shape[0]
for column in df:
sumColumn = df[column].isna().sum()
percentNA = sumColumn/rowCount*100
if percentNA <= percent:
listCol.append({'column':column ,'missing_values': sumColumn, 'missing_values(%)': percentNA})
else:
if dropCol == True:
print('Column dropped: ', column, ', missing values(%): ', percentNA )
df.drop([column], axis=1, inplace=True)
listCol = pd.DataFrame(listCol).sort_values(by='missing_values', ascending=False).reset_index(drop=True)
return listCol
def missing_values_per_track(df):
columnList=df.select_dtypes(['float64']).columns.tolist()
df_count=df.groupby('track.id').apply(lambda x: x.isna().sum())
df_prop=df.groupby('track.id').apply(lambda x: x.isna().sum()/len(x)*100)
return df_count, df_prop
def get_classified_correlations(df, method):
allCoeffs=[]
correlationsMatrixAll = df.corr(method=method)
for column in correlationsMatrixAll:
for i in correlationsMatrixAll[column].index:
df = correlationsMatrixAll.at[i, column]
if df < 1.0:
allCoeffs.append({'column':column, 'index':i, 'coefficient':df })
correlationsMatrix = correlationsMatrixAll.where(np.tril(np.ones(correlationsMatrixAll.shape)).astype(np.bool))
very_strong=[]
strong=[]
moderate=[]
weak=[]
for column in correlationsMatrix:
for i in correlationsMatrix[column].index:
df = correlationsMatrix.at[i, column]
if df >= 0.8 and df < 1.0 or df <= -0.8 and df > -1.0:
very_strong.append({'column':column, 'index':i, 'coefficient':df })#
if df >= 0.6 and df < 0.8 or df <= -0.6 and df > -0.8:
strong.append({'column':column, 'index':i, 'coefficient':df })
if df >= 0.4 and df < 0.6 or df <= -0.4 and df > -0.6:
moderate.append({'column':column, 'index':i, 'coefficient':df })
if df < 0.4 and df > -0.4:
weak.append({'column':column, 'index':i, 'coefficient':df })
very_strong = pd.DataFrame(very_strong).sort_values(by='coefficient', ascending=False).reset_index(drop=True)
strong = pd.DataFrame(strong).sort_values(by='coefficient', ascending=False).reset_index(drop=True)
moderate = pd.DataFrame(moderate).sort_values(by='coefficient', ascending=False).reset_index(drop=True)
weak=pd.DataFrame(weak).sort_values(by='coefficient', ascending=False).reset_index(drop=True)
allCoeffs= pd.DataFrame(allCoeffs).sort_values(by='coefficient', ascending=False).reset_index(drop=True)
return allCoeffs, very_strong, strong, moderate, weak
def get_correlation(df, method, variable1, variable2):
allCoeffs=[]
correlationsMatrixAll = df.corr(method=method)
for column in correlationsMatrixAll:
for i in correlationsMatrixAll[column].index:
df = correlationsMatrixAll.at[i, column]
if df < 1.0:
allCoeffs.append({'v1':column, 'v2':i, 'coefficient':df })
correlationsMatrix = correlationsMatrixAll.where(np.tril(np.ones(correlationsMatrixAll.shape)).astype(np.bool))
allCoeffs= pd.DataFrame(allCoeffs).sort_values(by='coefficient', ascending=False).reset_index(drop=True)
showCorr= allCoeffs.loc[(allCoeffs['v1'] == variable1) & (allCoeffs['v2'] == variable2)]
return showCorr
def correlation_heatmap_triangle(df, method, figsize=(20, 16)):
df = df.select_dtypes(['float64'])
coefficient = df.corr(method=method)
coefficient = coefficient.where(np.tril(np.ones(coefficient.shape)).astype(np.bool))
plt.figure(figsize=figsize)
sns.heatmap(coefficient, annot = True, vmin=-1, vmax=1.0, cmap="RdBu_r")
def get_single_track(df, track_id):
grouped = df.groupby('track.id')
df = grouped.get_group(track_id).copy()
return df
def show_dublicated_tracks(df):
dublicates = df[df[['geometry', 'Engine Load.value', 'Calculated MAF.value',
'Speed.value', 'CO2.value', 'Intake Pressure.value', 'Rpm.value',
'Intake Temperature.value', 'Consumption (GPS-based).value',
'GPS Altitude.value', 'Throttle Position.value', 'GPS Bearing.value',
'Consumption.value', 'GPS Accuracy.value',
'CO2 Emission (GPS-based).value', 'GPS Speed.value',
'track.length', 'track.begin', 'track.end', 'sensor.type',
'sensor.engineDisplacement', 'sensor.model', 'sensor.id',
'sensor.fuelType', 'sensor.constructionYear', 'sensor.manufacturer']].
duplicated(keep=False)==True]['track.id'].unique().tolist()
newdf= df.copy().loc[df['track.id'].isin(dublicates)]
ls= newdf['track.id'].unique().tolist()
print('Dublicated tracks:', ls)
return newdf
def count_tracks(df):
print(len(df['track.id'].unique().tolist()))
def show_units(df):
'''
Aim:
get an overview of the variables and corresponding units
Keyword Arguments:
df {Geodataframe} -- point input
Output: Matrix-like overview on variables an the relevant unit
'''
units = df.filter(like='.unit').columns
for unit in units:
if unit in df:
print(df[unit].name, df[unit].iloc[0])
return units
def get_units(df):
'''
Aim:
get an overview of the variables and corresponding units
Keyword Arguments:
df {Geodataframe} -- point input
Output: Matrix-like overview on variables an the relevant unit
'''
units = df.filter(like='.unit').columns
unitList=[]
for unit in units:
if unit in df:
unitList.append(unit)
#print(df[unit].name, df[unit].iloc[0])
return(unitList)
def get_categories(df):
for column in df:
print(column, df[column].unique())
def get_sensor_columns(df):
sensor = df.filter(like='sensor.', axis=1).columns.copy()
sensor = sensor.tolist()
df = df[sensor]
return df, sensor
def get_columns(df, name=''):
columns = df.filter(like=name, axis=1).columns.copy()
columns = columns.tolist()
df = df[columns]
return columns, df
def plot_tracks(points_df, column):
"""
Aim:
Visualize phenomena of tracks as timeserie in Linechart, in which each line represents one single track
Keyword Arguments:
df {Geodataframe} -- point input
Returns:
Chart is shown
"""
# Add datetime to data frame
points_df['datetime'] = pd.to_datetime(points_df['time'])
points_df['index']=points_df.index
fig = px.line(points_df, x="index", y=column, color="track.id",
line_group="track.id", hover_name="datetime")
fig.update_traces(mode='lines+markers')
fig.show()
def plot_point_values(points, value = None):
""" This function is based on a function from the envirocar fork of the github user 'annaformaniuk'.
Aim:
show points on a map
Keyword Arguments:
points {GeoDataFrame} -- points input
value {string} -- column value to use for colouring
Returns:
No Return
"""
points['lat'] = points['geometry'].apply(lambda coord: coord.y)
points['lng'] = points['geometry'].apply(lambda coord: coord.x)
if value is not None:
# Visualizing points of the selected variable
fig = px.scatter_mapbox(points, lat="lat", lon="lng", hover_data=["CO2.value"],
color=value,
color_continuous_scale=px.colors.sequential.Reds,
title=value + " visualisation", zoom=8,
hover_name="id")
else:
fig = px.scatter_mapbox(points, lat="lat", lon="lng", hover_data=["CO2.value"],
title= " Spatial distribution or requested tracks", zoom=8,
hover_name="id")
fig.update_layout(mapbox_style="open-street-map",
margin={"r": 5, "t": 50, "l": 10, "b": 5})
fig.show()
def plot_scatter(df, column1, column2, alpha=0.2):
relation = df[['track.id',column1, column2]]
relation.plot(kind='scatter', x = column1, y = column2, alpha=alpha )
def plot_normality_with_qqplot(point_df, column):
'''
Aim:
create q-q plot to inspect normality of distribution of selected variable
Keyword Arguments:
df {Geodataframe} -- points input
column {str} -- variable name
Output: Q-Q plot
'''
plot = stats.probplot(point_df[column], dist="norm", plot=plt, fit = False)
plt.title(column)
plt.show()
def plot_hist(df, column=''):
if column !='':
x = df[column]
else:
x = df
sns.distplot(x)
def plot_linear_regression(variableName1, variableName2, title=''):
sns.regplot(x=variableName1, y=variableName2).set_title(title)
def plot_distribution_s(points_df, column, column_gps = None):
"""
Aim:
Plot of two distributions in a single figure for visually comparing the shapes of the two distributions
Keyword Arguments:
points {GeoDataFrame} -- the GeoDataFrame containing the measurements
Column {str} -- the column name of measurement of interest,e.g. 'Speed.value'
Column {str} -- the column name of measurement of same phenomena but measured based on GPS, e.g. 'GPS speed.value'
Return:
No Return, instead a plot is displayed
"""
if column_gps is not None:
sns.kdeplot(points_df[column], shade=True)
sns.kdeplot(points_df[column_gps], shade=True)
else:
sns.kdeplot(points_df[column], shade=True)
def st_cube_simple(points):
""" To plot a space-time cube of one trajectory. Checks for the start time
and calculates seconds passed from it for every next point
Keyword Arguments:
points {dataframe} -- A Pandas dataframe of a trajectory
Returns:
No Return
"""
def seconds_from_start(x, start):
date_time_obj = datetime.datetime.strptime(x, '%Y-%m-%dT%H:%M:%S')
seconds = (date_time_obj-start).total_seconds()
return int(seconds)
points['lat'] = points['geometry'].apply(lambda coord: coord.y)
points['lng'] = points['geometry'].apply(lambda coord: coord.x)
start_time = datetime.datetime.strptime(
points.time.iloc[0], '%Y-%m-%dT%H:%M:%S')
points['time_seconds'] = np.vectorize(seconds_from_start)(
np.array(points.time.values.tolist()), start_time)
# plot the space-time cube
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(points['lng'], points['lat'], points['time_seconds'])
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
ax.set_zlabel('Seconds since start')
fig.canvas.set_window_title('Space-Time Cube')
plt.show()
def plot_pair_correlation(points_df, column_1, column_2,
sort_by='id', regression=False):
""" To plot a pairwise relationship in a dataset.
Special case for the Acceleration values to see difference
(if any) between accelerating and braking.
Keyword Arguments:
points_df {dataframe} -- A Pandas dataframe of a trajectory
column_1, column_2 {string} -- names of 2 columns to analyse
sort_by {string} -- 'id' or 'temperature'
regression {boolean} -- defines which kind of plot to plot
Returns:
No Return
"""
if (sort_by == 'temperature'):
bins = [-10, 0, 5, 10, 20, 30, 40]
copied = points_df.copy()
copied['Intake Temperature.value'] = \
copied['Intake Temperature.value'].astype(int)
copied['binned_temp'] = pd.cut(copied['Intake Temperature.value'],
bins)
if (column_2 == "Acceleration.value" or
column_1 == "Acceleration.value"):
df1 = copied[copied["Acceleration.value"] > 0]
df2 = copied[copied["Acceleration.value"] < 0]
if (regression):
sns.lmplot(x=column_1, y=column_2, hue='binned_temp',
data=df1, palette="viridis")
sns.lmplot(x=column_1, y=column_2, hue='binned_temp',
data=df2, palette="viridis")
else:
sns.pairplot(df1, vars=[column_1, column_2],
hue="binned_temp")
sns.pairplot(df2, vars=[column_1, column_2],
hue="binned_temp")
else:
if (regression):
sns.lmplot(x=column_1, y=column_2, hue='binned_temp',
data=copied)
else:
sns.pairplot(copied, vars=[column_1, column_2],
hue="binned_temp")
else:
if (column_2 == "Acceleration.value" or
column_1 == "Acceleration.value"):
df1 = points_df[points_df["Acceleration.value"] > 0]
df2 = points_df[points_df["Acceleration.value"] < 0]
if (regression):
sns.lmplot(x=column_1, y=column_2, hue='track.id',
data=df1, palette="viridis")
sns.lmplot(x=column_1, y=column_2, hue='track.id',
data=df2, palette="viridis")
else:
sns.pairplot(df1, vars=[column_1, column_2],
hue="track.id")
sns.pairplot(df2, vars=[column_1, column_2],
hue="track.id")
else:
if (regression):
sns.lmplot(x=column_1, y=column_2, hue='track.id',
data=points_df, palette="viridis")
else:
sns.pairplot(points_df, vars=[column_1, column_2],
hue="track.id")
def plot_distribution(points, column):
fig, (ax1, ax2, ax3) = plt.subplots(
1, 3, figsize=(15, 5), gridspec_kw={'width_ratios': [5, 5, 5]})
sns.boxplot(x=points[column], ax=ax1)
ax1.set_title('Boxplot')
sns.kdeplot(points[column], shade=True, color="r", ax=ax2)
ax2.set_title('Gaussian kernel density estimate')
sns.distplot(points[column], kde=False, ax=ax3)
ax3.set_title('Histogram')
fig.tight_layout()
plt.show()
def plot_region(region, region_map, region_color, label):
""" To plot provided regions over the provided map
Keyword Arguments:
region {shapely Polygon} -- A shapely based Polygon
region_map {folium map} -- Map over which trajectories are to be
plotted
region_color {string} -- Name of the Color in String
label {String} -- Label for popup
Returns:
No Return
"""
region_coords = []
# to extract coordiantes from provided region
index = 0
for value in range(0, len(region.exterior.coords)):
temp = | |
cell size units
identifiers (:obj:`list` of :obj:`Identifier`): identifiers
conclusions (:obj:`list` of :obj:`Conclusion`): conclusions
comments (:obj:`str`): comments
references (:obj:`list` of :obj:`Reference`): references
Related attributes:
* dfba_obj_expression (:obj:`DfbaObjectiveExpression`): dFBA objectie expression
* dfba_obj_species (:obj:`list` of :obj:`DfbaObjSpecies`): the components of this dFBA objective reaction
"""
id = SlugAttribute()
name = StringAttribute()
model = ManyToOneAttribute(Model, related_name='dfba_obj_reactions', verbose_related_name='dFBA objective reactions')
submodel = ManyToOneAttribute(Submodel, related_name='dfba_obj_reactions', verbose_related_name='dFBA objective reactions')
units = UnitAttribute(unit_registry,
choices=(unit_registry.parse_units('s^-1'),),
default=unit_registry.parse_units('s^-1'))
cell_size_units = UnitAttribute(unit_registry,
choices=(unit_registry.parse_units('l'), unit_registry.parse_units('gDCW')),
default=unit_registry.parse_units('l'))
identifiers = IdentifierManyToManyAttribute(related_name='dfba_obj_reactions',
verbose_related_name='dFBA objective reactions')
evidence = EvidenceManyToManyAttribute('Evidence', related_name='dfba_obj_reactions')
conclusions = ManyToManyAttribute('Conclusion', related_name='dfba_obj_reactions',
verbose_related_name='dFBA objective reactions')
comments = CommentAttribute()
references = ManyToManyAttribute('Reference', related_name='dfba_obj_reactions', verbose_related_name='dFBA objective reactions')
class Meta(obj_tables.Model.Meta, ExpressionDynamicTermMeta):
attribute_order = ('id', 'name', 'submodel', 'units', 'cell_size_units',
'identifiers', 'evidence', 'conclusions', 'comments', 'references')
indexed_attrs_tuples = (('id',), )
verbose_name = 'dFBA objective reaction'
expression_term_units = 'units'
merge = obj_tables.ModelMerge.append
children = {
'submodel': ('dfba_obj_species', 'identifiers', 'evidence', 'conclusions', 'references'),
'core_model': ('dfba_obj_species', 'identifiers', 'evidence', 'conclusions', 'references'),
}
child_attrs = {
'sbml': ('id', 'name', 'model', 'submodel', 'units', 'cell_size_units', 'identifiers', 'comments'),
'wc_sim': ('id', 'model', 'submodel', 'units', 'cell_size_units'),
}
def export_to_sbml(self, sbml_model):
""" Add a dFBA objective reaction to a SBML model.
DfbaObjReactions are added to the SBML model because they can be used in a dFBA submodel's
objective function. In fact the default objective function is the submodel's dFBA objective reaction.
Since SBML does not define DfbaObjReaction as a separate class, DfbaObjReactions are added
to the SBML model as SBML reactions.
CheckModel ensures that wc_lang DfbaObjReactions and Reactions have distinct ids.
Args:
sbml_model (:obj:`libsbml.Model`): SBML model
Returns:
:obj:`libsbml.Reaction`: SBML reaction
"""
# create SBML reaction in SBML document
sbml_rxn = call_libsbml(sbml_model.createReaction)
call_libsbml(sbml_rxn.setReversible, False)
sbml_plugin = call_libsbml(sbml_rxn.getPlugin, 'fbc')
for bound, value in [('Lower', -float('inf')), ('Upper', float('inf'))]:
param_id = "__DfbaObjReaction__Flux{}Bound__{}".format(self.id, bound)
param = LibSbmlInterface.create_parameter(sbml_model, param_id, value, self.units)
call_libsbml(getattr(sbml_plugin, 'set' + bound + 'FluxBound'), param_id)
# id, name
call_libsbml(sbml_rxn.setIdAttribute, self.gen_sbml_id())
call_libsbml(sbml_rxn.setName, self.name)
# comments
LibSbmlInterface.set_commments(self, sbml_rxn)
# return SBML reaction
return sbml_rxn
def export_relations_to_sbml(self, sbml_model, sbml_rxn):
""" Add relationships to/from object to SBML reaction.
Args:
sbml_model (:obj:`libsbml.Model`): SBML model
sbml_rxn (:obj:`libsbml.Reaction`): SBML reaction
"""
# participants
for dfba_obj_species in self.dfba_obj_species:
if dfba_obj_species.value < 0:
sbml_part = call_libsbml(sbml_rxn.createReactant)
coeff = -dfba_obj_species.value
else:
sbml_part = call_libsbml(sbml_rxn.createProduct)
coeff = dfba_obj_species.value
id = dfba_obj_species.species.gen_sbml_id()
call_libsbml(sbml_part.setIdAttribute, dfba_obj_species.gen_sbml_id())
# call_libsbml(sbml_part.setName, dfba_obj_species.name) # because libSBML has a bug in SpeciesReference.setName
call_libsbml(sbml_part.setSpecies, id)
call_libsbml(sbml_part.setConstant, True)
call_libsbml(sbml_part.setStoichiometry, coeff)
LibSbmlInterface.set_annotations(dfba_obj_species, LibSbmlInterface.gen_nested_attr_paths([
'name', 'units', 'identifiers']), sbml_part)
LibSbmlInterface.set_commments(dfba_obj_species, sbml_part)
# units, identifiers
annots = ['units', 'cell_size_units', 'identifiers']
LibSbmlInterface.set_annotations(self, LibSbmlInterface.gen_nested_attr_paths(annots), sbml_rxn)
def import_from_sbml(self, sbml_rxn):
""" Load from SBML reaction
Args:
sbml (:obj:`libsbml.Reaction`): SBML reaction
"""
annots = []
# id, name
self.id = self.parse_sbml_id(call_libsbml(sbml_rxn.getIdAttribute))
self.name = call_libsbml(sbml_rxn.getName)
# units
annots.extend(['units', 'cell_size_units'])
# comments
LibSbmlInterface.get_commments(self, sbml_rxn)
# annotations
LibSbmlInterface.get_annotations(self, LibSbmlInterface.gen_nested_attr_paths(annots), sbml_rxn)
def import_relations_from_sbml(self, sbml_rxn, objs):
""" Load relationships from SBML reaction
Args:
sbml (:obj:`libsbml.Reaction`): SBML reaction
objs (:obj:`dict`): dictionary that maps WC-Lang types to dictionaries that
map the ids of WC-Lang objects to WC-Lang objects
"""
# submodel
self.submodel = self.model.submodels[0]
# participants
for num_func, get_func, sense in [(sbml_rxn.getNumReactants, sbml_rxn.getReactant, -1),
(sbml_rxn.getNumProducts, sbml_rxn.getProduct, 1)]:
for i_part in range(call_libsbml(num_func, returns_int=True)):
sbml_part = call_libsbml(get_func, i_part)
species = objs[Species][Species.parse_sbml_id(call_libsbml(sbml_part.getSpecies))]
value = sense * call_libsbml(sbml_part.getStoichiometry)
dfba_obj_species = self.dfba_obj_species.create()
dfba_obj_species.id = DfbaObjSpecies.parse_sbml_id(call_libsbml(sbml_part.getIdAttribute))
# dfba_obj_species.name = call_libsbml(sbml_part.getName) # because libSBML has a bug in SpeciesReference.setName
dfba_obj_species.model = self.model
dfba_obj_species.species = species
dfba_obj_species.value = value
LibSbmlInterface.get_annotations(dfba_obj_species, LibSbmlInterface.gen_nested_attr_paths([
'name', 'units', 'identifiers']), sbml_part, objs)
LibSbmlInterface.get_commments(dfba_obj_species, sbml_part)
# identifiers
LibSbmlInterface.get_annotations(self, LibSbmlInterface.gen_nested_attr_paths(['identifiers']), sbml_rxn, objs)
class ParameterRenamed(obj_tables.Model, SbmlModelMixin):
""" ParameterRenamed
Attributes:
id (:obj:`str`): unique identifier per model/submodel
name (:obj:`str`): name
model (:obj:`Model`): model
type (:obj:`pronto.Term`): parameter type
value (:obj:`float`): value
std (:obj:`float`): standard error of the value
units (:obj:`unit_registry.Unit`): units of the value and standard error
identifiers (:obj:`list` of :obj:`Identifier`): identifiers
conclusions (:obj:`list` of :obj:`Conclusion`): conclusions
comments (:obj:`str`): comments
references (:obj:`list` of :obj:`Reference`): references
Related attributes:
* density_compartment (:obj:`Compartment`): compartments whose density is represented by the parameter
* observable_expressions (:obj:`list` of :obj:`ObservableExpression`): observable expressions
* function_expressions (:obj:`list` of :obj:`FunctionExpression`): function expressions
* rate_law_expressions (:obj:`list` of :obj:`RateLawExpression`): rate law expressions
* stop_condition_expressions (:obj:`list` of :obj:`StopConditionExpression`): stop condition expressions
"""
id = SlugAttribute()
name = StringAttribute()
model = ManyToOneAttribute(Model, related_name='parameters')
type = OntoTermAttribute(onto,
namespace='WC',
terms=onto['WC:parameter'].subclasses(),
default=None, none=True)
value = FloatAttribute()
std = FloatAttribute(min=0, verbose_name='Standard error')
units = UnitAttribute(unit_registry)
identifiers = IdentifierManyToManyAttribute(related_name='parameters')
evidence = EvidenceManyToManyAttribute('Evidence', related_name='parameters')
conclusions = ManyToManyAttribute('Conclusion', related_name='parameters')
comments = CommentAttribute()
references = ManyToManyAttribute('Reference', related_name='parameters')
class Meta(obj_tables.Model.Meta, ExpressionStaticTermMeta):
attribute_order = ('id', 'name', 'type',
'value', 'std', 'units',
'identifiers', 'evidence', 'conclusions', 'comments', 'references')
children = {
'submodel': ('identifiers', 'evidence', 'conclusions', 'references'),
'core_model': ('identifiers', 'evidence', 'conclusions', 'references'),
}
expression_term_value = 'value'
expression_term_units = 'units'
child_attrs = {
'sbml': ('id', 'name', 'model', 'type', 'value', 'std', 'units', 'identifiers', 'comments'),
'wc_sim': ('id', 'model', 'type', 'value', 'std', 'units'),
}
def export_to_sbml(self, sbml_model):
""" Add this parameter to a SBML model.
Args:
sbml_model (:obj:`libsbml.Model`): SBML model
Returns:
:obj:`libsbml.Parameter`: SBML parameter
"""
sbml_id = self.gen_sbml_id()
sbml = LibSbmlInterface.create_parameter(sbml_model, sbml_id, self.value, self.units,
name=self.name)
LibSbmlInterface.set_commments(self, sbml)
return sbml
def export_relations_to_sbml(self, sbml_model, sbml):
""" Add relationships to/from object to SBML parameter.
Args:
sbml_model (:obj:`libsbml.Model`): SBML model
sbml (:obj:`libsbml.Parameter`): SBML parameter
"""
annots = ['type', 'std', 'identifiers']
LibSbmlInterface.set_annotations(self, LibSbmlInterface.gen_nested_attr_paths(annots), sbml)
def import_from_sbml(self, sbml):
""" Load from SBML parameter
Args:
sbml (:obj:`libsbml.Parameter`): SBML parameter
"""
self.id = self.parse_sbml_id(call_libsbml(sbml.getIdAttribute))
self.name = call_libsbml(sbml.getName)
self.value = call_libsbml(sbml.getValue)
self.units = LibSbmlInterface.get_unit(sbml.getUnits)
LibSbmlInterface.get_commments(self, sbml)
def import_relations_from_sbml(self, sbml, objs):
""" Load relationships from SBML parameter
Args:
sbml (:obj:`libsbml.Parameter`): SBML parameter
objs (:obj:`dict`): dictionary that maps WC-Lang types to dictionaries that
map the ids of WC-Lang objects to WC-Lang objects
"""
annots = ['type', 'std', 'identifiers']
LibSbmlInterface.get_annotations(self, LibSbmlInterface.gen_nested_attr_paths(annots), sbml, objs)
class ObservationGenotype(obj_tables.Model, SbmlModelMixin):
""" Genotype of an observation
Attributes:
taxon (:obj:`str`): taxon in which the observation was observed
genetic_variant (:obj:`str`): genetic variant in which the observation was observed
Related attributes:
* genotype_observations (:obj:`list` of :obj:`Observation`): observations
"""
taxon = StringAttribute()
variant = StringAttribute()
class Meta(obj_tables.Model.Meta):
table_format = TableFormat.multiple_cells
unique_together = (('taxon', 'variant', ), )
attribute_order = ('taxon', 'variant')
children = {
'submodel': (),
'core_model': (),
}
child_attrs = {
'sbml': (),
'wc_sim': (),
}
def serialize(self):
""" Generate string representation
Returns:
:obj:`str`: string representation
"""
return '__'.join([self.taxon, self.variant])
class ObservationEnv(obj_tables.Model, SbmlModelMixin):
""" Environment of an observation
Attributes:
temp (:obj:`float`): temperature at which the observation was observed
temp_units (:obj:`unit_registry.Unit`): temperature units
ph (:obj:`float`): pH at which the observation was observed
ph_units (:obj:`unit_registry.Unit`): pH units
growth_media (:obj:`str`): growth media at which the observation was observed
condition (:obj:`str`): experimental conditions (e.g. control)
Related attributes:
* env_observations (:obj:`list` of :obj:`Observation`): observations
"""
temp = FloatAttribute(nan=True, verbose_name='Temperature')
temp_units = UnitAttribute(unit_registry,
choices=(unit_registry.parse_units('celsius'),),
none=True,
verbose_name='Temperature units')
ph = FloatAttribute(nan=True, verbose_name='pH')
ph_units = UnitAttribute(unit_registry,
choices=(unit_registry.parse_units('dimensionless'),),
none=True,
verbose_name='pH units')
growth_media = LongStringAttribute()
condition = LongStringAttribute()
class Meta(obj_tables.Model.Meta):
table_format = TableFormat.multiple_cells
unique_together = (('temp', 'temp_units', 'ph', 'ph_units', 'growth_media', 'condition'), )
attribute_order = ('temp', 'temp_units', 'ph', 'ph_units', 'growth_media', 'condition')
children = {
'submodel': (),
'core_model': (),
}
child_attrs = {
'sbml': (),
'wc_sim': (),
}
def serialize(self):
""" Generate string representation
Returns:
:obj:`str`: string representation
"""
return '__'.join([str(self.temp), str(self.temp_units),
str(self.ph), str(self.ph_units),
self.growth_media, self.condition])
def validate(self):
""" Determine if the environment is valid
* temperature units are defined if the temperature is not None
* pH units are defined if the pH is not None
Returns:
:obj:`InvalidObject` or None: `None` if the object is valid,
otherwise return a list of errors as an instance of `InvalidObject`
"""
invalid_obj = super(ObservationEnv, self).validate()
if invalid_obj:
errors = invalid_obj.attributes
else:
errors = []
if self.temp is not None and not isnan(self.temp) \
and not isinstance(self.temp_units, unit_registry.Unit):
errors.append(InvalidAttribute(self.Meta.attributes['temp_units'],
['Temperature units must be defined']))
if self.ph is not None and not isnan(self.ph) \
and not isinstance(self.ph_units, unit_registry.Unit):
errors.append(InvalidAttribute(self.Meta.attributes['ph_units'],
['pH units must be defined']))
if errors:
return InvalidObject(self, errors)
return None
class Process(obj_tables.Model, SbmlModelMixin):
""" A process of an observation or conclusion
Attributes:
name (:obj:`str`): procedure which produced the conclusion
version (:obj:`str`): version of procedure which produced the conclusion
Related attributes:
* observation_analysis (:obj:`list` of :obj:`Observation`): observation
* observation_measurement (:obj:`list` of :obj:`Observation`): observation
* conclusions (:obj:`list` of :obj:`Conclusion`): conclusions
"""
name = LongStringAttribute()
version = StringAttribute()
| |
graph we expect to have built through
subdivision::
sage: paths2 = [2 + k, 5 + k, 9 + k]
sage: paths2 = map(graphs.PathGraph, paths2)
sage: g2 = Graph()
sage: for P in paths2:
....: g2 = g2 + P
sage: g.is_isomorphic(g2)
True
.. SEEALSO::
- :meth:`subdivide_edge` -- subdivides one edge
"""
if isinstance(edges, EdgesView):
edges = tuple(edges)
for e in edges:
self.subdivide_edge(e, k)
def delete_edge(self, u, v=None, label=None):
r"""
Delete the edge from ``u`` to ``v``.
This method returns silently if vertices or edge does not
exist.
INPUT: The following forms are all accepted:
- G.delete_edge( 1, 2 )
- G.delete_edge( (1, 2) )
- G.delete_edges( [ (1, 2) ] )
- G.delete_edge( 1, 2, 'label' )
- G.delete_edge( (1, 2, 'label') )
- G.delete_edges( [ (1, 2, 'label') ] )
EXAMPLES::
sage: G = graphs.CompleteGraph(9)
sage: G.size()
36
sage: G.delete_edge( 1, 2 )
sage: G.delete_edge( (3, 4) )
sage: G.delete_edges( [ (5, 6), (7, 8) ] )
sage: G.size()
32
::
sage: G.delete_edge( 2, 3, 'label' )
sage: G.delete_edge( (4, 5, 'label') )
sage: G.delete_edges( [ (6, 7, 'label') ] )
sage: G.size()
32
sage: G.has_edge( (4, 5) ) # correct!
True
sage: G.has_edge( (4, 5, 'label') ) # correct!
False
::
sage: C = digraphs.Complete(9)
sage: C.size()
72
sage: C.delete_edge( 1, 2 )
sage: C.delete_edge( (3, 4) )
sage: C.delete_edges( [ (5, 6), (7, 8) ] )
sage: C.size()
68
::
sage: C.delete_edge( 2, 3, 'label' )
sage: C.delete_edge( (4, 5, 'label') )
sage: C.delete_edges( [ (6, 7, 'label') ] )
sage: C.size() # correct!
68
sage: C.has_edge( (4, 5) ) # correct!
True
sage: C.has_edge( (4, 5, 'label') ) # correct!
False
"""
if label is None:
if v is None:
try:
u, v, label = u
except Exception:
u, v = u
label = None
self._backend.del_edge(u, v, label, self._directed)
def delete_edges(self, edges):
"""
Delete edges from an iterable container.
EXAMPLES::
sage: K12 = graphs.CompleteGraph(12)
sage: K4 = graphs.CompleteGraph(4)
sage: K12.size()
66
sage: K12.delete_edges(K4.edge_iterator())
sage: K12.size()
60
::
sage: K12 = digraphs.Complete(12)
sage: K4 = digraphs.Complete(4)
sage: K12.size()
132
sage: K12.delete_edges(K4.edge_iterator())
sage: K12.size()
120
"""
self._backend.del_edges(edges, self._directed)
def contract_edge(self, u, v=None, label=None):
r"""
Contract an edge from ``u`` to ``v``.
This method returns silently if the edge does not exist.
INPUT: The following forms are all accepted:
- G.contract_edge( 1, 2 )
- G.contract_edge( (1, 2) )
- G.contract_edge( [ (1, 2) ] )
- G.contract_edge( 1, 2, 'label' )
- G.contract_edge( (1, 2, 'label') )
- G.contract_edge( [ (1, 2, 'label') ] )
EXAMPLES::
sage: G = graphs.CompleteGraph(4)
sage: G.contract_edge((0, 1)); G.edges()
[(0, 2, None), (0, 3, None), (2, 3, None)]
sage: G = graphs.CompleteGraph(4)
sage: G.allow_loops(True); G.allow_multiple_edges(True)
sage: G.contract_edge((0, 1)); G.edges()
[(0, 2, None), (0, 2, None), (0, 3, None), (0, 3, None), (2, 3, None)]
sage: G.contract_edge((0, 2)); G.edges()
[(0, 0, None), (0, 3, None), (0, 3, None), (0, 3, None)]
::
sage: G = graphs.CompleteGraph(4).to_directed()
sage: G.allow_loops(True)
sage: G.contract_edge(0, 1); G.edges()
[(0, 0, None),
(0, 2, None),
(0, 3, None),
(2, 0, None),
(2, 3, None),
(3, 0, None),
(3, 2, None)]
TESTS:
Make sure loops don't get lost::
sage: edgelist = [(0, 0, 'a'), (0, 1, 'b'), (1, 1, 'c')]
sage: G = Graph(edgelist, loops=True, multiedges=True)
sage: G.contract_edge(0, 1, 'b'); G.edges()
[(0, 0, 'a'), (0, 0, 'c')]
sage: D = DiGraph(edgelist, loops=True, multiedges=True)
sage: D.contract_edge(0, 1, 'b'); D.edges()
[(0, 0, 'a'), (0, 0, 'c')]
With labeled edges::
sage: G = graphs.CompleteGraph(4)
sage: G.allow_loops(True); G.allow_multiple_edges(True)
sage: for e in G.edges(sort=False):
....: G.set_edge_label(e[0], e[1], (e[0] + e[1]))
sage: G.contract_edge(0, 1); G.edges()
[(0, 2, 2), (0, 2, 3), (0, 3, 3), (0, 3, 4), (2, 3, 5)]
sage: G.contract_edge(0, 2, 4); G.edges()
[(0, 2, 2), (0, 2, 3), (0, 3, 3), (0, 3, 4), (2, 3, 5)]
"""
# standard code to allow 3 arguments or a single tuple:
if label is None:
if v is None:
try:
u, v, label = u
except Exception:
u, v = u
label = None
# unlike delete_edge(), we must be careful about contracting non-edges
if not self.has_edge(u, v, label):
return
self.delete_edge(u, v, label)
# if the edge was a loop, stop
# this could potentially leave isolated vertices
if u == v:
return
if (self.allows_loops() and (self.allows_multiple_edges() or
not self.has_edge(u, u))):
# add loops
for x, y, l in self.edges_incident(v):
if set([x, y]) == set([u, v]):
self.add_edge(u, u, l)
self.merge_vertices([u, v])
def contract_edges(self, edges):
r"""
Contract edges from an iterable container.
If `e` is an edge that is not contracted but the vertices of `e` are
merged by contraction of other edges, then `e` will become a loop.
INPUT:
- ``edges`` -- a list containing 2-tuples or 3-tuples that represent
edges
EXAMPLES::
sage: G = graphs.CompleteGraph(4)
sage: G.allow_loops(True); G.allow_multiple_edges(True)
sage: G.contract_edges([(0, 1), (1, 2), (0, 2)]); G.edges()
[(0, 3, None), (0, 3, None), (0, 3, None)]
sage: G.contract_edges([(1, 3), (2, 3)]); G.edges()
[(0, 3, None), (0, 3, None), (0, 3, None)]
sage: G = graphs.CompleteGraph(4)
sage: G.allow_loops(True); G.allow_multiple_edges(True)
sage: G.contract_edges([(0, 1), (1, 2), (0, 2), (1, 3), (2, 3)]); G.edges()
[(0, 0, None)]
::
sage: D = digraphs.Complete(4)
sage: D.allow_loops(True); D.allow_multiple_edges(True)
sage: D.contract_edges([(0, 1), (1, 0), (0, 2)]); D.edges()
[(0, 0, None),
(0, 0, None),
(0, 0, None),
(0, 3, None),
(0, 3, None),
(0, 3, None),
(3, 0, None),
(3, 0, None),
(3, 0, None)]
TESTS:
With non-edges in the input::
sage: G = graphs.BullGraph(); G.add_edge(3, 4); G.edges()
[(0, 1, None),
(0, 2, None),
(1, 2, None),
(1, 3, None),
(2, 4, None),
(3, 4, None)]
sage: G.contract_edges([(1, 3), (1, 4)]); G.edges()
[(0, 1, None), (0, 2, None), (1, 2, None), (1, 4, None), (2, 4, None)]
With loops in a digraph::
sage: D = DiGraph([(0, 0), (0, 1), (1, 1)], loops=True, multiedges=True)
sage: D.contract_edges([(1, 0)]); D.edges()
[(0, 0, None), (0, 1, None), (1, 1, None)]
sage: D.contract_edges([(0, 1)]); D.edges()
[(0, 0, None), (0, 0, None)]
::
sage: edgelist = [(0, 1, 0), (0, 1, 1), (0, 1, 2)]
sage: G = Graph(edgelist, loops=True, multiedges=True)
sage: G.contract_edges([(0, 1), (0, 1, 2)]); G.edges()
Traceback (most recent call last):
...
ValueError: edge tuples in input should have the same length
::
sage: G = graphs.CompleteGraph(4)
sage: G.allow_loops(True); G.allow_multiple_edges(True)
sage: for e in G.edges(sort=False):
....: G.set_edge_label(e[0], e[1], (e[0] + e[1]))
sage: H = G.copy()
sage: G.contract_edges([(0, 1), (0, 2)]); G.edges()
[(0, 0, 3), (0, 3, 3), (0, 3, 4), (0, 3, 5)]
sage: H.contract_edges([(0, 1, 1), (0, 2, 3)]); H.edges()
[(0, 2, 2), (0, 2, 3), (0, 3, 3), (0, 3, 4), (2, 3, 5)]
"""
if len(set(len(e) for e in edges)) > 1:
raise ValueError("edge tuples in input should have the same length")
edge_list = []
vertices = set()
for e in edges:
# try to get the vertices and label of e as distinct variables
try:
u, v, label = e
except Exception:
u, v = e
label = None
if self.has_edge(u, v, label):
edge_list.append((u, v, label))
vertices.add(u)
vertices.add(v)
if not edge_list:
return
# implementation of union_find using DisjointSet
from sage.sets.disjoint_set import DisjointSet
DS = DisjointSet(self.vertex_iterator())
for u, v, label in edge_list:
DS.union(u, v)
self.delete_edges(edge_list)
edges_incident = []
vertices = [v for v in vertices if v != DS.find(v)]
if self.is_directed():
for v in vertices:
out_edges = self.edge_boundary([v])
edges_incident.extend(out_edges)
edges_incident.extend(self.incoming_edge_iterator(v))
self.delete_vertex(v)
else:
for v in vertices:
edges_incident.extend(self.edges_incident(v, sort=False))
self.delete_vertex(v)
for (u, v, label) in edges_incident:
root_u = DS.find(u)
root_v = DS.find(v)
if root_v != root_u or self.allows_loops():
self.add_edge(root_u, root_v, label)
def delete_multiedge(self, u, v):
r"""
Delete all edges from ``u`` to ``v``.
EXAMPLES::
sage: G = Graph(multiedges=True, sparse=True)
sage: G.add_edges([(0, 1), (0, 1), (0, 1), (1, 2), (2, 3)])
sage: G.edges()
[(0, 1, None), (0, 1, None), (0, 1, None), (1, 2, None), (2, 3, None)]
sage: G.delete_multiedge(0, 1)
sage: G.edges()
[(1, 2, None), (2, 3, | |
<reponame>GIS-PuppetMaster/TENSILE<filename>pycode/tinyflow/Inceptionv3_test_leo.py<gh_stars>0
import os
GPU = 0
os.environ['CUDA_VISIBLE_DEVICES'] = f'{GPU}'
import sys
sys.path.append('../../')
from pycode.tinyflow import autodiff as ad
from pycode.tinyflow.get_result import get_result
from util import *
class Inceptionv3():
def __init__(self, num_step, batch_size, log_path, job_id):
self.dropout_rate = 0.5
self.image_channel = 3
self.image_size = 299
self.num_step = num_step
self.batch_size = batch_size
self.job_id = job_id
self.log_path = log_path
self.executor_ctx = None
self.n_class = None
self.ad = ad
self.top_control_queue = None
self.top_message_queue = None
def conv2dplusrelu(self, node, filter, model, type, stride_h, stride_w):
node_new = self.ad.convolution_2d_forward_op(node, filter, model, type, stride_h, stride_w)
node_after = self.ad.activation_forward_op(node_new, model, "relu")
return node_after
def get_predict_results(self, n_class, **kwargs):
X = self.ad.Placeholder("X")
y_ = self.ad.Placeholder("y_")
filterb_1 = self.ad.Variable("filterb_1")
filterb_2 = self.ad.Variable("filterb_2")
filterb_3 = self.ad.Variable("filterb_3")
filterb_4 = self.ad.Variable("filterb_4")
filterb_5 = self.ad.Variable("filterb_5")
filtersb_val1 = (32, 3, 3, 3)
filtersb_val2 = (32, 32, 3, 3)
filtersb_val3 = (64, 32, 3, 3)
filtersb_val4 = (80, 64, 1, 1)
filtersb_val5 = (192, 80, 3, 3)
# inception前
covb_1 = self.conv2dplusrelu(X, filterb_1, "NCHW", "VALID", 2, 2)
covb_2 = self.conv2dplusrelu(covb_1, filterb_2, "NCHW", "VALID", 1, 1)
covb_3 = self.conv2dplusrelu(covb_2, filterb_3, "NCHW", "SAME", 1, 1)
poolb = self.ad.pooling_2d_forward_op(covb_3, "NCHW", "max", 0, 0, 2, 2, 3, 3)
covb_4 = self.conv2dplusrelu(poolb, filterb_4, "NCHW", "VALID", 1, 1)
covb_5 = self.conv2dplusrelu(covb_4, filterb_5, "NCHW", "VALID", 1, 1)
covb = self.ad.pooling_2d_forward_op(covb_5, "NCHW", "max", 0, 0, 2, 2, 3, 3)
# inception_moudle1
# inception_moudle1_1
filter1_1_0 = self.ad.Variable("filter1_1_0")
filter1_1_1a = self.ad.Variable("filter1_1_1a")
filter1_1_1b = self.ad.Variable("filter1_1_1b")
filter1_1_2a = self.ad.Variable("filter1_1_2a")
filter1_1_2b = self.ad.Variable("filter1_1_2b")
filter1_1_2c = self.ad.Variable("filter1_1_2c")
filter1_1_3 = self.ad.Variable("filter1_1_3a")
filter1_1_0_val = (64, 192, 1, 1)
filter1_1_1_vala = (48, 192, 1, 1)
filter1_1_1_valb = (64, 48, 5, 5)
filter1_1_2_vala = (64, 192, 1, 1)
filter1_1_2_valb = (96, 64, 3, 3)
filter1_1_2_valc = (96, 96, 3, 3)
filter1_1_3_val = (32, 192, 1, 1)
# branch_0
incep1_1_0 = self.conv2dplusrelu(covb, filter1_1_0, "NCHW", "SAME", 1, 1)
# branch 1
incep1_1_1a = self.conv2dplusrelu(covb, filter1_1_1a, "NCHW", "SAME", 1, 1)
incep1_1_1 = self.conv2dplusrelu(incep1_1_1a, filter1_1_1b, "NCHW", "SAME", 1, 1)
# branch 2
incep1_1_2a = self.conv2dplusrelu(covb, filter1_1_2a, "NCHW", "SAME", 1, 1)
incep1_1_2b = self.conv2dplusrelu(incep1_1_2a, filter1_1_2b, "NCHW", "SAME", 1, 1)
incep1_1_2 = self.conv2dplusrelu(incep1_1_2b, filter1_1_2c, "NCHW", "SAME", 1, 1)
# branch 3
incep1_1_3a = self.ad.pooling_2d_forward_op(covb, "NCHW", "mean", 1, 1, 1, 1, 3, 3)
incep1_1_3 = self.conv2dplusrelu(incep1_1_3a, filter1_1_3, "NCHW", "SAME", 1, 1)
concat1_1a = self.ad.concat_forward_op(incep1_1_0, incep1_1_1)
concat1_1b = self.ad.concat_forward_op(concat1_1a, incep1_1_2)
concat1_1 = self.ad.concat_forward_op(concat1_1b, incep1_1_3)
# inception_moudle1_2
filter1_2_0 = self.ad.Variable("filter1_2_0")
filter1_2_1a = self.ad.Variable("filter1_2_1a")
filter1_2_1b = self.ad.Variable("filter1_2_1b")
filter1_2_2a = self.ad.Variable("filter1_2_2a")
filter1_2_2b = self.ad.Variable("filter1_2_2b")
filter1_2_2c = self.ad.Variable("filter1_2_2c")
filter1_2_3 = self.ad.Variable("filter1_2_3a")
filter1_2_0_val = (64, 256, 1, 1)
filter1_2_1_vala = (48, 256, 1, 1)
filter1_2_1_valb = (64, 48, 5, 5)
filter1_2_2_vala = (64, 256, 1, 1)
filter1_2_2_valb = (96, 64, 3, 3)
filter1_2_2_valc = (96, 96, 3, 3)
filter1_2_3_val = (64, 256, 1, 1)
# branch_0
incep1_2_0 = self.conv2dplusrelu(concat1_1, filter1_2_0, "NCHW", "SAME", 1, 1)
# branch 1
incep1_2_1a = self.conv2dplusrelu(concat1_1, filter1_2_1a, "NCHW", "SAME", 1, 1)
incep1_2_1 = self.conv2dplusrelu(incep1_2_1a, filter1_2_1b, "NCHW", "SAME", 1, 1)
# branch 2
incep1_2_2a = self.conv2dplusrelu(concat1_1, filter1_2_2a, "NCHW", "SAME", 1, 1)
incep1_2_2b = self.conv2dplusrelu(incep1_2_2a, filter1_2_2b, "NCHW", "SAME", 1, 1)
incep1_2_2 = self.conv2dplusrelu(incep1_2_2b, filter1_2_2c, "NCHW", "SAME", 1, 1)
# branch 3
incep1_2_3a = self.ad.pooling_2d_forward_op(concat1_1, "NCHW", "mean", 1, 1, 1, 1, 3, 3)
incep1_2_3 = self.conv2dplusrelu(incep1_2_3a, filter1_2_3, "NCHW", "SAME", 1, 1)
concat1_2a = self.ad.concat_forward_op(incep1_2_0, incep1_2_1)
concat1_2b = self.ad.concat_forward_op(concat1_2a, incep1_2_2)
concat1_2 = self.ad.concat_forward_op(concat1_2b, incep1_2_3)
# inception_moudle1_3
filter1_3_0 = self.ad.Variable("filter1_3_0")
filter1_3_1a = self.ad.Variable("filter1_3_1a")
filter1_3_1b = self.ad.Variable("filter1_3_1b")
filter1_3_2a = self.ad.Variable("filter1_3_2a")
filter1_3_2b = self.ad.Variable("filter1_3_2b")
filter1_3_2c = self.ad.Variable("filter1_3_2c")
filter1_3_3 = self.ad.Variable("filter1_3_3")
filter1_3_0_val = (64, 288, 1, 1)
filter1_3_1_vala = (48, 288, 1, 1)
filter1_3_1_valb = (64, 48, 5, 5)
filter1_3_2_vala = (64, 288, 1, 1)
filter1_3_2_valb = (96, 64, 3, 3)
filter1_3_2_valc = (96, 96, 3, 3)
filter1_3_3_val = (64, 288, 1, 1)
# branch_0
incep1_3_0 = self.conv2dplusrelu(concat1_2, filter1_3_0, "NCHW", "SAME", 1, 1)
# branch 1
incep1_3_1a = self.conv2dplusrelu(concat1_2, filter1_3_1a, "NCHW", "SAME", 1, 1)
incep1_3_1 = self.conv2dplusrelu(incep1_3_1a, filter1_3_1b, "NCHW", "SAME", 1, 1)
# branch 2
incep1_3_2a = self.conv2dplusrelu(concat1_2, filter1_3_2a, "NCHW", "SAME", 1, 1)
incep1_3_2b = self.conv2dplusrelu(incep1_3_2a, filter1_3_2b, "NCHW", "SAME", 1, 1)
incep1_3_2 = self.conv2dplusrelu(incep1_3_2b, filter1_3_2c, "NCHW", "SAME", 1, 1)
# branch 3
incep1_3_3a = self.ad.pooling_2d_forward_op(concat1_2, "NCHW", "mean", 1, 1, 1, 1, 3, 3)
incep1_3_3 = self.conv2dplusrelu(incep1_3_3a, filter1_3_3, "NCHW", "SAME", 1, 1)
concat1_3a = self.ad.concat_forward_op(incep1_3_0, incep1_3_1)
concat1_3b = self.ad.concat_forward_op(concat1_3a, incep1_3_2)
concat1_3 = self.ad.concat_forward_op(concat1_3b, incep1_3_3)
#
#
#
#
# # inception_moudle2
# inception_moudle2_1
filter2_1_0 = self.ad.Variable("filter2_1_0")
filter2_1_1a = self.ad.Variable("filter2_1_1a")
filter2_1_1b = self.ad.Variable("filter2_1_1b")
filter2_1_1c = self.ad.Variable("filter2_1_1c")
filter2_1_0_val = (384, 288, 3, 3)
filter2_1_1_vala = (64, 288, 1, 1)
filter2_1_1_valb = (96, 64, 3, 3)
filter2_1_1_valc = (96, 96, 3, 3)
# branch_0
incep2_1_0 = self.conv2dplusrelu(concat1_3, filter2_1_0, "NCHW", "VALID", 2, 2)
# branch 1
incep2_1_1a = self.conv2dplusrelu(concat1_3, filter2_1_1a, "NCHW", "SAME", 1, 1)
incep2_1_1b = self.conv2dplusrelu(incep2_1_1a, filter2_1_1b, "NCHW", "SAME", 1, 1)
incep2_1_1 = self.conv2dplusrelu(incep2_1_1b, filter2_1_1c, "NCHW", "VALID", 2, 2)
# branch 2
incep2_1_2 = self.ad.pooling_2d_forward_op(concat1_3, "NCHW", "mean", 0, 0, 2, 2, 3, 3)
concat2_1a = self.ad.concat_forward_op(incep2_1_0, incep2_1_1)
concat2_1 = self.ad.concat_forward_op(concat2_1a, incep2_1_2)
# inception_moudle2_2
filter2_2_0 = self.ad.Variable("filter2_2_0")
filter2_2_1a = self.ad.Variable("filter2_2_1a")
filter2_2_1b = self.ad.Variable("filter2_2_1b")
filter2_2_1c = self.ad.Variable("filter2_2_1c")
filter2_2_2a = self.ad.Variable("filter2_2_2a")
filter2_2_2b = self.ad.Variable("filter2_2_2b")
filter2_2_2c = self.ad.Variable("filter2_2_2c")
filter2_2_2d = self.ad.Variable("filter2_2_2d")
filter2_2_2e = self.ad.Variable("filter2_2_2e")
filter2_2_3 = self.ad.Variable("filter2_2_3a")
filter2_2_0_val = (192, 768, 1, 1)
filter2_2_1_vala = (128, 768, 1, 1)
filter2_2_1_valb = (128, 128, 1, 7)
filter2_2_1_valc = (192, 128, 7, 1)
filter2_2_2_vala = (128, 768, 1, 1)
filter2_2_2_valb = (128, 128, 7, 1)
filter2_2_2_valc = (128, 128, 1, 7)
filter2_2_2_vald = (128, 128, 7, 1)
filter2_2_2_vale = (192, 128, 1, 7)
filter2_2_3_val = (192, 768, 1, 1)
# branch_0
incep2_2_0 = self.conv2dplusrelu(concat2_1, filter2_2_0, "NCHW", "SAME", 1, 1)
# branch 1
incep2_2_1a = self.conv2dplusrelu(concat2_1, filter2_2_1a, "NCHW", "SAME", 1, 1)
incep2_2_1b = self.conv2dplusrelu(incep2_2_1a, filter2_2_1b, "NCHW", "SAME", 1, 1)
incep2_2_1 = self.conv2dplusrelu(incep2_2_1b, filter2_2_1c, "NCHW", "SAME", 1, 1)
# branch 2
incep2_2_2a = self.conv2dplusrelu(concat2_1, filter2_2_2a, "NCHW", "SAME", 1, 1)
incep2_2_2b = self.conv2dplusrelu(incep2_2_2a, filter2_2_2b, "NCHW", "SAME", 1, 1)
incep2_2_2c = self.conv2dplusrelu(incep2_2_2b, filter2_2_2c, "NCHW", "SAME", 1, 1)
incep2_2_2d = self.conv2dplusrelu(incep2_2_2c, filter2_2_2d, "NCHW", "SAME", 1, 1)
incep2_2_2 = self.conv2dplusrelu(incep2_2_2d, filter2_2_2e, "NCHW", "SAME", 1, 1)
# branch 3
incep2_2_3a = self.ad.pooling_2d_forward_op(concat2_1, "NCHW", "mean", 1, 1, 1, 1, 3, 3)
incep2_2_3 = self.conv2dplusrelu(incep2_2_3a, filter2_2_3, "NCHW", "SAME", 1, 1)
concat2_2a = self.ad.concat_forward_op(incep2_2_0, incep2_2_1)
concat2_2b = self.ad.concat_forward_op(concat2_2a, incep2_2_2)
concat2_2 = self.ad.concat_forward_op(concat2_2b, incep2_2_3)
# inception_moudle2_3
filter2_3_0 = self.ad.Variable("filter2_3_0")
filter2_3_1a = self.ad.Variable("filter2_3_1a")
filter2_3_1b = self.ad.Variable("filter2_3_1b")
filter2_3_1c = self.ad.Variable("filter2_3_1c")
filter2_3_2a = self.ad.Variable("filter2_3_2a")
filter2_3_2b = self.ad.Variable("filter2_3_2b")
filter2_3_2c = self.ad.Variable("filter2_3_2c")
filter2_3_2d = self.ad.Variable("filter2_3_2d")
filter2_3_2e = self.ad.Variable("filter2_3_2e")
filter2_3_3 = self.ad.Variable("filter2_3_3a")
filter2_3_0_val = (192, 768, 1, 1)
filter2_3_1_vala = (160, 768, 1, 1)
filter2_3_1_valb = (160, 160, 1, 7)
filter2_3_1_valc = (192, 160, 7, 1)
filter2_3_2_vala = (160, 768, 1, 1)
filter2_3_2_valb = (160, 160, 7, 1)
filter2_3_2_valc = (160, 160, 1, 7)
filter2_3_2_vald = (160, 160, 7, 1)
filter2_3_2_vale = (192, 160, 1, 7)
filter2_3_3_val = (192, 768, 1, 1)
# branch_0
incep2_3_0 = self.conv2dplusrelu(concat2_2, filter2_3_0, "NCHW", "SAME", 1, 1)
# branch 1
incep2_3_1a = self.conv2dplusrelu(concat2_2, filter2_3_1a, "NCHW", "SAME", 1, 1)
incep2_3_1b = self.conv2dplusrelu(incep2_3_1a, filter2_3_1b, "NCHW", "SAME", 1, 1)
incep2_3_1 = self.conv2dplusrelu(incep2_3_1b, filter2_3_1c, "NCHW", "SAME", 1, 1)
# branch 2
incep2_3_2a = self.conv2dplusrelu(concat2_2, filter2_3_2a, "NCHW", "SAME", 1, 1)
incep2_3_2b = self.conv2dplusrelu(incep2_3_2a, filter2_3_2b, "NCHW", "SAME", 1, 1)
incep2_3_2c = self.conv2dplusrelu(incep2_3_2b, filter2_3_2c, "NCHW", "SAME", 1, 1)
incep2_3_2d = self.conv2dplusrelu(incep2_3_2c, filter2_3_2d, "NCHW", "SAME", 1, 1)
incep2_3_2 = self.conv2dplusrelu(incep2_3_2d, filter2_3_2e, "NCHW", "SAME", 1, 1)
# branch 3
incep2_3_3a = self.ad.pooling_2d_forward_op(concat2_2, "NCHW", "mean", 1, 1, 1, 1, 3, 3)
incep2_3_3 = self.conv2dplusrelu(incep2_3_3a, filter2_3_3, "NCHW", "SAME", 1, 1)
concat2_3a = self.ad.concat_forward_op(incep2_3_0, incep2_3_1)
concat2_3b = self.ad.concat_forward_op(concat2_3a, incep2_3_2)
concat2_3 = self.ad.concat_forward_op(concat2_3b, incep2_3_3)
# inception_moudle2_4
filter2_4_0 = self.ad.Variable("filter2_4_0")
filter2_4_1a = self.ad.Variable("filter2_4_1a")
filter2_4_1b = self.ad.Variable("filter2_4_1b")
filter2_4_1c = self.ad.Variable("filter2_4_1c")
filter2_4_2a = self.ad.Variable("filter2_4_2a")
filter2_4_2b = self.ad.Variable("filter2_4_2b")
filter2_4_2c = self.ad.Variable("filter2_4_2c")
filter2_4_2d = self.ad.Variable("filter2_4_2d")
filter2_4_2e = self.ad.Variable("filter2_4_2e")
filter2_4_3 = self.ad.Variable("filter2_4_3a")
filter2_4_0_val = (192, 768, 1, 1)
filter2_4_1_vala = (160, 768, 1, 1)
filter2_4_1_valb = (160, 160, 1, 7)
filter2_4_1_valc = (192, 160, 7, 1)
filter2_4_2_vala = (160, 768, 1, 1)
filter2_4_2_valb = (160, 160, 7, 1)
filter2_4_2_valc = (160, 160, 1, 7)
filter2_4_2_vald = (160, 160, 7, 1)
filter2_4_2_vale = (192, 160, 1, 7)
filter2_4_3_val = (192, 768, 1, 1)
# branch_0
| |
<reponame>getzneet/transcoded-rhythm-recognition
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Multi-layer Perceptron
# By <NAME>, based on a Nicolas Rougier's program, distributed under the terms of the BSD License.
# -----------------------------------------------------------------------------
# This is an implementation of the multi-layer perceptron with retropropagation
# The network learns with transcoded rhythm structures, tries to recognize the song at first, then tries to
#guess the music genre the song belongs to.
# -----------------------------------------------------------------------------
import numpy as np
from scipy import stats
class MLP:
""" Multi-layer perceptron class. """
def __init__(self, *args):
""" Initialization of the perceptron with given sizes. """
self.shape = args
n = len(args)
# Build layers
self.layers = []
# Input layer (+1 unit for bias)
self.layers.append(np.ones(self.shape[0]+1))
# Hidden layer(s) + output layer
for i in range(1, n):
self.layers.append(np.ones(self.shape[i]))
# Build weights matrix (randomly between -0.25 and +0.25)
self.weights = []
for i in range(n-1):
self.weights.append(np.zeros((self.layers[i].size,
self.layers[i+1].size)))
# dw will hold last change in weights (for momentum)
self.dw = [0, ]*len(self.weights)
# Reset weights
self.reset()
def reset(self):
""" Reset weights """
for i in range(len(self.weights)):
Z = np.random.random((self.layers[i].size, self.layers[i+1].size))
self.weights[i][...] = (2*Z-1)*0.25
def propagate_forward(self, data):
""" Propagate data from input layer to output layer. """
# Set input layer
self.layers[0][0:-1] = data
# Propagate from layer 0 to layer n-1 using sigmoid as activation function
for i in range(1, len(self.shape)):
# Propagate activity
self.layers[i][...] = self.sigmoid(np.dot(self.layers[i-1], self.weights[i-1]))
# Return output
return self.layers[-1]
def propagate_backward(self, target, lrate=0.1, momentum=0.0):
""" Back propagate error related to target using lrate. """
deltas = []
# Compute error on output layer
error = target - self.layers[-1]
delta = error * self.dsigmoid(self.layers[-1])
deltas.append(delta)
# Compute error on hidden layers
for i in range(len(self.shape)-2, 0, -1):
delta = np.dot(deltas[0], self.weights[i].T) * self.dsigmoid(self.layers[i])
deltas.insert(0, delta)
# Update weights
for i in range(len(self.weights)):
layer = np.atleast_2d(self.layers[i])
delta = np.atleast_2d(deltas[i])
dw = np.dot(layer.T, delta)
self.weights[i] += lrate*dw + momentum*self.dw[i]
self.dw[i] = dw
# Return error
return np.sum(error**2)
@staticmethod
def sigmoid(x):
""" Sigmoid like function using tanh """
return np.tanh(x)
@staticmethod
def dsigmoid(x):
""" Derivative of sigmoid above """
return 1.0 - x**2
# -----------------------------------------------------------------------------
class ChiefOperatingOfficer(object):
def __init__(self, dataset, presentation_number, *hidden_number):
self.scores = []
self.scores2 = []
self.fail = 0
self.win = 0
self.fail2 = 0
self.win2 = 0
self.dataset = dataset
self.input_number = np.size(self.dataset[0]['input'])
self.output_number = np.size(self.dataset[0]['output'])
self.id = 0
if hidden_number:
self.hidden_number = hidden_number
else:
self.hidden_number = self.input_number
self.n_samples = np.size(self.dataset)
self.presentation_number = presentation_number
# Create the network
self.network = MLP(self.input_number, self.hidden_number, self.output_number)
self.ask_the_network_to_learn()
def test1(self,sample):
a = self.network.propagate_forward(sample)
a2 = a.tolist()
b = np.max([a2])
c = a2.index(b)
c = int(c)
k = samples[c]['title']
if np.all(sample == samples[c]['input']):
print "Answer is right ",k
self.win += 1
else:
print "Answer is wrong ",k
self.fail += 1
def test2(self,sample):
a = self.network.propagate_forward(sample)
a2 = a.tolist()
b = np.max([a2])
c = a2.index(b)
c = int(c)
e = samples[0]['output']
f = samples[5]['output']
g = samples[10]['output']
print "-----------"
for i in range(12):
if np.all(sample == test[i]['input']) and np.all(test[i]['output'] == e):
print " The tested track belongs to rock music. \n"
if c == 0 :
print " The ANN thinks it's rock. \n"
self.win2 += 1
else:
self.fail2 += 1
print " The ANN isn't sure about this track. \n"
elif np.all(sample == test[i]['input']) and np.all(test[i]['output'] == f):
print " The tested track belongs to reggae music.\n"
if c == 1 :
print " The ANN thinks it's reggae. \n"
self.win2 += 1
else:
self.fail2 += 1
print " The ANN isn't sure about this track. \n"
elif np.all(sample == test[i]['input']) and np.all(test[i]['output'] == g):
print " The tested track belongs to bossa nova music.\n"
if c == 2 :
print " The ANN thinks it's bossa nova\n"
self.win2 += 1
else:
self.fail2 += 1
print " The ANN isn't sure about this track. \n"
def ask_the_network_to_learn(self):
self.network.reset()
# Create order with index that will be randomized
order = np.arange(self.n_samples)
# Each presentation, the perceptron receives all the inputs and outputs of the dataset (but in
# a different order)
for i in range(self.presentation_number):
# Each presentation, use a different order
np.random.shuffle(order)
errors = np.ones(self.n_samples)
for j in order:
a = self.network.propagate_forward(samples['input'][j])
errors[j] = self.network.propagate_backward(samples['output'][j])
print "Presentation n°{i}. Error mean: {e_min}. Error max: {e_max}."\
.format(i=i, e_min=np.mean(errors), e_max=np.max(errors))
# -----------------------------------------------------------------------------
class Suprachief(object):
def __init__(self,x):
self.ann = []
self.get_tracks(x)
self.create_networks()
def get_tracks(self,x):
if x == 0:
array = [line.rstrip('\n') for line in open('tracks2.txt')]
i = 0
for line in array:
c = line.split(',')
samples[i]['input'] = [float(y) for y in c[0]]
samples[i]['output'] = [float(y) for y in c[1]]
samples[i]['title'] = c[2]
tracks.append(c[2])
i += 1
else:
array = [line.rstrip('\n') for line in open('tracks.txt')]
i = 0
for line in array:
c = line.split(',')
samples[i]['input'] = [float(y) for y in c[0]]
samples[i]['output'] = [float(y) for y in c[1]]
samples[i]['title'] = c[2]
tracks.append(c[2])
i += 1
testarray = [line.rstrip('\n') for line in open('test.txt')]
i = 0
for line in testarray:
c = line.split(',')
test[i]['input'] = [float(y) for y in c[0]]
test[i]['output'] = [float(y) for y in c[1]]
i += 1
def create_networks(self):
for p in range(0,26):
self.ann.append('Ann {l}'.format(l=p))
self.ann[p] = ChiefOperatingOfficer(samples, 1,2)
self.ann[p].id = 'ANN {nb}'.format(nb=p)
for p in range(26,51):
self.ann.append('Ann {l}'.format(l=p))
self.ann[p] = ChiefOperatingOfficer(samples,1,40)
self.ann[p].id = 'ANN {nb}'.format(nb=p)
for p in range(51,76):
self.ann.append('Ann {l}'.format(l=p))
self.ann[p] = ChiefOperatingOfficer(samples,100,2)
self.ann[p].id = 'ANN {nb}'.format(nb=p)
for p in range(76,101):
self.ann.append('Ann {l}'.format(l=p))
self.ann[p] = ChiefOperatingOfficer(samples,100,40)
self.ann[p].id = 'ANN {nb}'.format(nb=p)
#------------------------------------------------------------------------------------------------
if __name__ == '__main__':
tracks = []
samples = np.zeros(12, dtype=[('input', float, 32), ('output', float, 12),('title',str,50)])
samples2 = np.zeros(12, dtype=[('input', float, 32), ('output', float, 12),('title',str,50)])
print "\n"
print "-"*10
print "Learning "
print "-"*10
print "\n"
#------------------------------------------------------------------------------------------------
#Condition 1
sc = Suprachief(0)
scores = []
for i in range(0,101):
print "*-----------------------------------------------------------------*"
print sc.ann[i].id, ':'
samples2['input'] = samples['input']
random = samples2['input']
np.random.shuffle(random)
a = 0
for x in random:
a += 1
print "Track n°{a}".format(a=a)
sc.ann[i].test1(x)
scores.append(sc.ann[i].win)
#-----------------------------------------------------------------------------------------------
#Condition 2
scores2 =[]
samples = np.zeros(12, dtype=[('input', float, 32), ('output', float, 3),('title',str,50)])
samples2 = np.zeros(12, dtype=[('input', float, 32), ('output', float, 3),('title',str,50)])
test = np.zeros(12, dtype=[('input', float, 32), ('output', float, 3),('title',str,50)])
sc = Suprachief(1)
for i in range(0,101):
print "*-----------------------------------------------------------------*"
print sc.ann[i].id, ':'
samples2['input'] = test['input']
random = samples2['input']
np.random.shuffle(random)
a = 0
for x in random:
a += 1
print "Track n°{a}".format(a=a)
sc.ann[i].test2(x)
scores2.append(sc.ann[i].win2)
#------------------------------------------------------------------------------------------------
#Stats
#condition 1 / Kruskel - Wallis
tab = np.zeros(101, dtype=[('capacités cognitives', float, 1), ('entraînement', float, 1),('performances',float,1)])
for i in range(0,26):
tab[i]['capacités cognitives'] = 0
tab[i]['entraînement'] = 0
tab[i]['performances'] = scores[i]
for i in range(26,51):
tab[i]['capacités cognitives'] = 1
tab[i]['entraînement'] = 0
tab[i]['performances'] = scores[i]
for i in range(51,76):
tab[i]['capacités cognitives'] = 0
tab[i]['entraînement'] = 1
tab[i]['performances'] = scores[i]
for i in range(76,101):
tab[i]['capacités cognitives'] = 1
tab[i]['entraînement'] = 1
tab[i]['performances'] = scores[i]
print stats.mstats.kruskalwallis(tab[0:26],tab[26:51],tab[51:76],tab[76:100])
#condition 1 / <NAME>
# for i in range(26,51):
# c.append(scores[i])
#
# for i in range(51,76):
# v.append(scores[i])
#
#
# for i in range(76,101):
# d.append(scores[i])
#
j = []
c = []
v = []
d = []
for i in range(0,26):
j.append(scores[i])
for i in range(76,101):
d.append(scores[i])
#
print stats.shapiro(j)
print stats.shapiro(d)
print stats.levene(j,d)
print stats.mannwhitneyu(j,d)
# CONDITION 2 /<NAME>
tab = np.zeros(101, dtype=[('capacités cognitives', float, 1), ('entraînement', float, 1),('performances',float,1)])
for i in range(0,26):
tab[i]['capacités cognitives'] = 0
tab[i]['entraînement'] = 0
tab[i]['performances'] = scores2[i]
for i in range(26,51):
tab[i]['capacités cognitives'] = 1
tab[i]['entraînement'] = 0
tab[i]['performances'] = scores2[i]
for i in range(51,76):
tab[i]['capacités cognitives'] = 0
tab[i]['entraînement'] = 1
tab[i]['performances'] = scores2[i]
for i in range(76,101):
tab[i]['capacités cognitives'] = 1
tab[i]['entraînement'] = 1
tab[i]['performances'] = scores2[i]
print stats.mstats.kruskalwallis(tab[0:26],tab[26:51],tab[51:76],tab[76:100])
#CONDITION 2 / Mann - Withney
j = []
c = []
v = []
d = []
for i in range(0,26):
j.append(scores2[i])
for i in range(76,101):
d.append(scores2[i])
#
print stats.shapiro(j)
print stats.shapiro(d)
print stats.levene(j,d)
print stats.mannwhitneyu(j,d)
# j = []
# c = []
# v = []
# d = []
#
# for i in range(0,26):
# j.append(scores2[i])
# j = np.mean(j)
#
# for i in range(26,51):
# c.append(scores2[i])
# c = np.mean(c)
#
| |
être fourni",
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Ontbrekende identificatie in transactie - entry.id is verplicht",
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "事务中缺少Id - 必须提供一个entry.id",
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Identificador de la transacción no encontrado - se debe proporcionar un entry.id",
},
],
"display": "Missing Identifier in transaction - an entry.id must be provided",
}
)
"""
Missing Identifier in transaction - an entry.id must be provided
"""
msg_unhandled_node_type = CodeSystemConcept(
{
"code": "MSG_UNHANDLED_NODE_TYPE",
"designation": [
{
"language": "ru",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Не обработанный xml узел "%s"',
},
{
"language": "it",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Tipo di nodo Xml non gestito "%s"',
},
{
"language": "pl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Nieobsługiwany typ węzła XML "%s"',
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Type de noeud xml "%s" non traité',
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Kan xml nodetype "%s" niet verwerken',
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": '未处理的XML节点类型"%s"',
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Tipo de nodo Xml no soportado "%s"',
},
],
"display": 'Unhandled xml node type "%s"',
}
)
"""
Unhandled xml node type "%s"
"""
msg_unknown_content = CodeSystemConcept(
{
"code": "MSG_UNKNOWN_CONTENT",
"designation": [
{
"language": "ru",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Неизвестный контент (%s) в %s",
},
{
"language": "it",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Contenuto Sconosciuto (%s) at %s",
},
{
"language": "pl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Nieznana zawartość (%s) dla %s",
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Contenu inconnu (%s) à %s",
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Onbekende content (%s) at %s",
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "未知内容 (%s) 位于 %s",
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Contenido desconocido (%s) en %s",
},
],
"display": "Unknown Content (%s) at %s",
}
)
"""
Unknown Content (%s) at %s
"""
msg_unknown_operation = CodeSystemConcept(
{
"code": "MSG_UNKNOWN_OPERATION",
"designation": [
{
"language": "ru",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "неизвестная операция FHIR http",
},
{
"language": "it",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "operazione http FHIR sconosciuta",
},
{
"language": "pl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "nieznana operacja FHIR http",
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "operation http FHIR inconnue",
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "onbekende FHIR http operation",
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "未知的FHIR HTTP操作",
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Operación http FHIR desconocida",
},
],
"display": "unknown FHIR http operation",
}
)
"""
unknown FHIR http operation
"""
msg_unknown_type = CodeSystemConcept(
{
"code": "MSG_UNKNOWN_TYPE",
"designation": [
{
"language": "ru",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Тип ресурса "%s" не распознан',
},
{
"language": "it",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Tipo di Risorsa "%s" non riconosciuto',
},
{
"language": "pl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Nie rozpoznany typ zasobu: "%s"',
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Type de ressource "%s" non reconnu',
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Resourcetype "%s" niet herkend',
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": '资源类型"%s"未识别',
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Tipo de Recurso "%s" no reconocido',
},
],
"display": 'Resource Type "%s" not recognised',
}
)
"""
Resource Type "%s" not recognised
"""
msg_updated = CodeSystemConcept(
{
"code": "MSG_UPDATED",
"designation": [
{
"language": "ru",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "существующий ресурс обновлён",
},
{
"language": "it",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "risorsa esistente aggiornata",
},
{
"language": "pl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "uaktualniono istniejący zasób",
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "ressource existante mise à jour",
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "bestaande resource updated",
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "已有资源被更新",
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Recurso existente actualizado",
},
],
"display": "existing resource updated",
}
)
"""
existing resource updated
"""
msg_version_aware = CodeSystemConcept(
{
"code": "MSG_VERSION_AWARE",
"designation": [
{
"language": "ru",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Для данного ресурса необходимы обновления с учётом версии",
},
{
"language": "it",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Questa risorsa richiede aggiornamenti per versione",
},
{
"language": "pl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Uaktualnienia zakładające wersjonowanie są wymagane dla tego zasobu",
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Des mises à jour en relation avec la version sont requises pour cette ressource",
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Versie-bewuste updates zijn vereist voor deze resource",
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "该资源的更新必须针对版本",
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Este recurso requiere actualizaciones en base a versiones",
},
],
"display": "Version aware updates are required for this resource",
}
)
"""
Version aware updates are required for this resource
"""
msg_version_aware_conflict = CodeSystemConcept(
{
"code": "MSG_VERSION_AWARE_CONFLICT",
"designation": [
{
"language": "ru",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Конфликт обновления (текущая версия сервера = "%s", указанная версия клиента = "%s")',
},
{
"language": "it",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Conflitto nell\'\'aggiornamento (attuale = "%s", quotato = "%s")',
},
{
"language": "pl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Konflikt podczas uaktualnienia (obecna wersja na serwerze = "%s", wersja wskazana przez klienta = "%s")',
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Conflit de mise à jour (version courante du serveur = "%s", version référencée du client = "%s")',
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Updateconflict (huidige serverversie = "%s", opgegeven clientversie = "%s")',
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": '更新冲突 (服务器当前版本 = "%s", 客户端引用的版本 = "%s")',
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": 'Conflicto de actualizaciones (versión actual del servidor = "%s", versión del cliente referenciada = "%s")',
},
],
"display": 'Update Conflict (server current version = "%s", client version referenced = "%s")',
}
)
"""
Update Conflict (server current version = "%s", client version referenced = "%s")
"""
msg_version_aware_url = CodeSystemConcept(
{
"code": "MSG_VERSION_AWARE_URL",
"designation": [
{
"language": "ru",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "URL для указанной версии не распознан",
},
{
"language": "it",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "URL specifico alla versione non riconosciuto",
},
{
"language": "pl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Nie rozpoznany URL specyficzny dla wersji",
},
{
"language": "fr",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "URL spécifique à une version non reconnue",
},
{
"language": "nl",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "Versie-specifieke URL niet herkend",
},
{
"language": "zh",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "未识别特定版本的URL",
},
{
"language": "es",
"use": {
"code": "display",
"system": "http://terminology.hl7.org/CodeSystem/designation-usage",
},
"value": "URL especifica de la versión no | |
<gh_stars>0
def NEXAFS_S_edge(t=0.5):
yield from bps.mv(waxs, 65)
dets = [pil300KW]
name = 'su8_ne'
energies = np.linspace(2430, 2520, 91)
det_exposure_time(t,t)
name_fmt = '{sample}_{energy}eV_xbpm{xbpm}'
for e in energies:
yield from bps.mv(energy, e)
yield from bps.sleep(1)
bpm = xbpm3.sumX.value
sample_name = name_fmt.format(sample=name, energy=e, xbpm = '%3.2f'%bpm)
sample_id(user_name='SR', sample_name=sample_name)
print(f'\n\t=== Sample: {sample_name} ===\n')
yield from bp.count(dets, num=1)
def NEXAFS_Cl_edge(t=0.5):
yield from bps.mv(waxs, 65)
dets = [pil300KW, pil1M]
name = 'ZEP_flood_redo_ai1.5deg'
energies = np.linspace(2800, 2850, 51)
energies = np.linspace(2815, 2850, 71)
det_exposure_time(t,t)
name_fmt = '{sample}_{energy}eV_xbpm{xbpm}'
for e in energies:
yield from bps.mv(energy, e)
yield from bps.sleep(1)
bpm = xbpm3.sumX.value
sample_name = name_fmt.format(sample=name, energy=e, xbpm = '%3.2f'%bpm)
sample_id(user_name='SR', sample_name=sample_name)
print(f'\n\t=== Sample: {sample_name} ===\n')
yield from bp.count(dets, num=1)
def NEXAFS_S_edge_fine(t=0.5):
yield from bps.mv(waxs, 65)
dets = [pil300KW]
energies = np.arange(2440, 2470, 5).tolist() + np.arange(2470, 2475, 1).tolist() + np.arange(2475, 2485, 0.5).tolist() + np.arange(2485, 2500, 1).tolist() + np.arange(2500, 2520, 5).tolist()
ai = 1.5
# names = [ 'Sn', 'Sb', 'SP', 'Sn_exp', 'Sb_exp', 'SP_exp', 'SnZ', 'SbZ', 'SPZ', 'Sn_exp_Z', 'Sb_exp_Z', 'SP_exp_Z']
# x_piezo = [48000, 38000, 28000, 20000, 12000, 1000, -8000, -16000, -22000, -30000, -38000, -48000]
# y_piezo = [ 6500, 6500, 6500, 6500, 6500, 6500, 6900, 6900, 6900, 6900, 6900, 6900]
# z_piezo = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
names = [ 'ZEP_flood']
x_piezo = [ 51000]
y_piezo = [ 7500]
z_piezo = [ 0]
ai0=1
for name, x, y in zip(names, x_piezo, y_piezo):
yield from bps.mv(piezo.x, x)
yield from bps.mv(piezo.y, y)
yield from bps.mv(piezo.th, ai0)
det_exposure_time(t,t)
name_fmt = 'nexafs_{sample}_{energy}eV_angle{ai}_bpm{xbpm}'
yield from bps.mv(GV7.open_cmd, 1)
yield from bps.sleep(2)
yield from bps.mv(GV7.open_cmd, 1)
yield from alignement_gisaxs(angle = 0.4)
yield from bps.mv(GV7.close_cmd, 1)
yield from bps.sleep(2)
yield from bps.mv(GV7.close_cmd, 1)
yield from bps.mv(att2_9.open_cmd, 1)
yield from bps.sleep(1)
yield from bps.mv(att2_9.open_cmd, 1)
yield from bps.sleep(1)
ai0 = piezo.th.position
yield from bps.mv(piezo.th, ai0 + 1.5)
det_exposure_time(t,t)
name_fmt = 'nexafs_2s_{sample}_{energy}eV_angle{ai}_bpm{xbpm}'
for e in energies:
yield from bps.mv(energy, e)
yield from bps.sleep(1)
bpm = xbpm3.sumX.value
sample_name = name_fmt.format(sample=name, energy=e, ai = 1.5, xbpm = '%3.2f'%bpm)
sample_id(user_name='SR', sample_name=sample_name)
print(f'\n\t=== Sample: {sample_name} ===\n')
yield from bp.count(dets, num=1)
yield from bps.mv(energy, 2500)
yield from bps.mv(energy, 2470)
yield from bps.mv(energy, 2450)
def alignement_Tiwale(t=1):
global names, x_piezo, z_piezo, incident_angles, y_piezo_aligned, xs_hexa
names= ['su8_ue', 'su8_exp', 'uv6_ue', 'uv6_exp', 'pag_0', 'pag_20', 'pag_40']
x_piezo = [53500, 34900, 25500, 5500, -15500, -38500, -47500]
y_piezo = [6859.975, 6900, 6900, 6900, 6900, 6900, 6900]
z_piezo = [ 0, 0, 0, 0, 0, 0]
x_hexa = [ 10, 10, 10, 10, 10, 0]
incident_angles = []
y_piezo_aligned = []
smi = SMI_Beamline()
yield from smi.modeAlignment(technique='gisaxs')
for name, xs_piezo, zs_piezo, ys_piezo, xs_hexa in zip(names, x_piezo, z_piezo, y_piezo, x_hexa):
yield from bps.mv(stage.x, xs_hexa)
yield from bps.mv(piezo.x, xs_piezo)
yield from bps.mv(piezo.y, ys_piezo)
yield from bps.mv(piezo.z, zs_piezo)
yield from bps.mv(piezo.th, 0)
yield from alignement_gisaxs_multisample(angle = 0.25)
incident_angles = incident_angles + [piezo.th.position]
y_piezo_aligned = y_piezo_aligned + [piezo.y.position]
yield from smi.modeMeasurement()
print(incident_angles)
print(y_piezo_aligned)
def NEXAFS_S_edge_fine_multisample(t=0.5):
global names, x_piezo, z_piezo, incident_angles, y_piezo_aligned, xs_hexa
names= ['su8_ue', 'su8_exp', 'uv6_ue', 'uv6_exp', 'pag_0', 'pag_20', 'pag_40']
x_piezo = [53500, 37900, 25500, 8500, -15500, -38500, -47500]
y_piezo = [ 6900, 6900, 6900, 6900, 6900, 6900, 6900]
z_piezo = [ 0, 0, 0, 0, 0, 0, 0]
x_hexa = [ 10, 10, 10, 10, 10, 10, 0]
incident_angles = [ 0.2122, 0.168532, -0.113152, -0.313694, 0.233214, 0.207891, 0.186071]
y_piezo_aligned = [6859.975, 6915.767, 6901.334, 6922.385, 6992.986, 7033.101, 7140.869]
energies = np.arange(2440, 2470, 5).tolist() + np.arange(2470, 2475, 1).tolist() + np.arange(2475, 2485, 0.5).tolist() + np.arange(2485, 2500, 1).tolist() + np.arange(2500, 2520, 5).tolist()
yield from bps.mv(waxs, 65)
dets = [pil300KW]
det_exposure_time(t,t)
name_fmt = '{sample}_{energy}eV_xbpm{xbpm}'
for name, xs_piezo, zs_piezo, ys_piezo, xs_hexa, ais in zip(names, x_piezo, z_piezo, y_piezo_aligned, x_hexa, incident_angles):
yield from bps.mv(stage.x, xs_hexa)
yield from bps.mv(piezo.x, xs_piezo)
yield from bps.mv(piezo.y, ys_piezo)
yield from bps.mv(piezo.z, zs_piezo)
yield from bps.mv(piezo.th, ais + 0.7)
for e in energies:
yield from bps.mv(energy, e)
yield from bps.sleep(1)
bpm = xbpm3.sumX.value
sample_name = name_fmt.format(sample=name, energy=e, xbpm = '%3.2f'%bpm)
sample_id(user_name='SR', sample_name=sample_name)
print(f'\n\t=== Sample: {sample_name} ===\n')
yield from bp.count(dets, num=1)
yield from bps.mv(energy, 2500)
yield from bps.sleep(2)
yield from bps.mv(energy, 2470)
yield from bps.sleep(2)
yield from bps.mv(energy, 2450)
yield from bps.sleep(2)
def saxs_prep_multisample(t=1):
dets = [pil300KW]
energies = [2450, 2475, 2476, 2477, 2478, 2479, 2480, 2481, 2482, 2483, 2484, 2490, 2500]
det_exposure_time(t,t)
name_fmt = '{sample}_{energy}eV_pos{posi}_wa{wa}_xbpm{xbpm}'
waxs_range = np.linspace(0, 39, 7)
# names= [ 'pag_0', 'pag_20', 'pag_40']
# x_piezo = [ -15500, -38500, -47500]
# y_piezo = [ 6900, 6900, 6900]
# z_piezo = [ 0, 0, 0]
# x_hexa = [ 7, 7, 0]
# incident_angles = [ 0.233214, 0.207891, 0.186071]
# y_piezo_aligned = [ 6992.986, 7033.101, 7140.869]
names= [ 'pag_40']
x_piezo = [ -47500]
y_piezo = [ 6900]
z_piezo = [ 0]
x_hexa = [ -4]
incident_angles = [ 0.186071]
y_piezo_aligned = [ 7140.869]
det_exposure_time(t,t)
for wa in waxs_range:
yield from bps.mv(waxs, wa)
for name, xs_piezo, zs_piezo, ys_piezo, xs_hexa, ais in zip(names, x_piezo, z_piezo, y_piezo_aligned, x_hexa, incident_angles):
yield from bps.mv(stage.x, xs_hexa)
yield from bps.mv(piezo.x, xs_piezo)
yield from bps.mv(piezo.y, ys_piezo)
yield from bps.mv(piezo.z, zs_piezo)
yield from bps.mv(piezo.th, ais)
for k, e in enumerate(energies):
yield from bps.mv(piezo.x, xs_piezo - k * 300)
yield from bps.mv(energy, e)
yield from bps.sleep(1)
name_fmt = '{sample}_saxs_ai{ai}_{energy}eV_xbpm{xbpm}_wa{wa}'
for j, aiss in enumerate([0.3, 0.5, 0.7, 1.0, 1.5]):
yield from bps.mv(piezo.th, ais + aiss)
sample_name = name_fmt.format(sample=name, ai = '%1.2f'%aiss, energy=e, xbpm = '%3.1f'%xbpm3.sumY.value, wa='%2.1f'%wa)
sample_id(user_name='OS', sample_name=sample_name)
print(f'\n\t=== Sample: {sample_name} ===\n')
yield from bp.count(dets, num=1)
yield from bps.mv(energy, 2470)
yield from bps.sleep(2)
yield from bps.mv(energy, 2450)
yield from bps.sleep(2)
def SAXS_S_edge_fine(t=1):
dets = [pil1M]
name = 's3_ai0.9deg_sdd2.5m'
energies = np.arange(2450, 2470, 5).tolist() + np.arange(2470, 2475, 1).tolist() + np.arange(2475, 2485, 0.5).tolist() + np.arange(2485, 2500, 1).tolist() + np.arange(2500, 2520, 5).tolist()
det_exposure_time(t,t)
name_fmt = '{sample}_{energy}eV_xbpm{xbpm}'
for e in energies:
yield from bps.mv(energy, e)
yield from bps.sleep(1)
bpm = xbpm3.sumX.value
sample_name = name_fmt.format(sample=name, energy=e, xbpm = '%3.2f'%bpm)
sample_id(user_name='SR', sample_name=sample_name)
print(f'\n\t=== Sample: {sample_name} ===\n')
yield from bp.count(dets, num=1)
yield from bps.mv(energy, 2500)
yield from bps.mv(energy, 2470)
yield from bps.mv(energy, 2450)
def fly_scan_ai_nikhil(det, motor, cycle=1, cycle_t=10, phi = -0.6):
start = phi - 30
stop = phi + 30
acq_time = cycle * cycle_t
yield from bps.mv(motor, start)
det.stage()
det.cam.acquire_time.put(acq_time)
print(f'Acquire time before staging: {det.cam.acquire_time.get()}')
st = det.trigger()
for i in range(cycle):
yield from list_scan([], motor, [start, stop])
while not st.done:
pass
det.unstage()
print(f'We are done after {acq_time}s of waiting')
def SAXS_S_edge_allprs(t=1):
dets = [pil1M]
name = 's2_ai0.9deg'
prs0 = 1.275
det_exposure_time(t,t)
name_fmt = '{sample}_2450eV_sdd2.5m_prs{prs}_xbpm{xbpm}'
for prs_pos in np.linspace(-25, 25, 1001):
yield from bps.mv(prs, prs0 + prs_pos)
yield from bps.sleep(1)
bpm = xbpm3.sumX.value
sample_name = name_fmt.format(sample=name, prs='%3.2f'%prs_pos, xbpm = '%3.2f'%bpm)
sample_id(user_name='SR', sample_name=sample_name)
print(f'\n\t=== Sample: {sample_name} ===\n')
yield from bp.count(dets, num=1)
def giwaxs_S_edge_pag_2021_2(t=1):
dets = [pil300KW, pil1M]
names = ['pag0_redo', 'pag20', 'pag40']
x = [44000, 28000, 5000]
energies = [2450, 2475, 2476, 2477, 2478, 2479, 2480, 2481, 2482, 2483, 2484, 2490, 2500]
det_exposure_time(t,t)
name_fmt = '{sample}_{energy}eV_pos{posi}_wa{wa}_xbpm{xbpm}'
waxs_range = np.linspace(0, 39, 7)
ai0 = 0
for name, xs in zip(names, x):
yield from bps.mv(piezo.x, xs)
yield from bps.mv(piezo.th, ai0)
waxs_range = np.linspace(0, 26, 5)
# yield from bps.mv(GV7.open_cmd, 1 )
# yield from bps.sleep(1)
# yield from bps.mv(GV7.open_cmd, 1 )
# yield from bps.sleep(1)
yield from alignement_gisaxs(angle = 0.4)
# yield from bps.mv(GV7.close_cmd, 1 )
# yield from bps.sleep(1)
# yield from bps.mv(GV7.close_cmd, 1 )
# yield from bps.sleep(1)
ai0 = piezo.th.position
for wa in waxs_arc:
yield from bps.mv(waxs, wa)
det_exposure_time(t,t)
for k, e in enumerate(energies):
yield from bps.mv(piezo.x, xs - k * 300)
yield from bps.mv(energy, e)
yield from bps.sleep(1)
name_fmt = '{sample}_2.8m_ai{ai}_{energy}eV_xbpm{xbpm}_wa{wa}'
for j, aiss in enumerate([0.3, 0.5, 0.7, 1.5]):
yield from bps.mv(piezo.th, ai0 + aiss)
sample_name = name_fmt.format(sample=name, ai = '%1.2f'%aiss, energy=e, xbpm = '%3.1f'%xbpm3.sumY.value, wa='%2.1f'%wa)
sample_id(user_name='OS', sample_name=sample_name)
print(f'\n\t=== Sample: {sample_name} ===\n')
yield from bp.count(dets, num=1)
yield | |
# on the host, lstat won't try to follow symlinks
rstat = os.lstat(filename)
except:
rstat = os.stat(filename)
return rstat[:7] + tuple(tim + TIME_OFFSET for tim in rstat[7:])
def stat(filename):
"""Returns os.stat for a given file, adjusting the timestamps as appropriate."""
import os
rstat = os.stat(filename)
return rstat[:7] + tuple(tim + TIME_OFFSET for tim in rstat[7:])
def sysname():
"""Returns the os.uname().sysname field."""
try:
import os
return repr(os.uname().sysname)
except:
return repr('unknown')
def is_visible(filename):
"""Determines if the file should be considered to be a non-hidden file."""
return filename[0] != '.' and filename[-1] != '~'
@extra_funcs(stat)
def get_stat(filename):
"""Returns the stat array for a given file. Returns all 0's if the file
doesn't exist.
"""
try:
return stat(filename)
except OSError:
return (0,) * 10
@extra_funcs(lstat)
def get_lstat(filename):
"""Returns the stat array for a given file. Returns all 0's if the file
doesn't exist.
"""
try:
return lstat(filename)
except OSError:
return (0,) * 10
def listdir(dirname):
"""Returns a list of filenames contained in the named directory."""
import os
return os.listdir(dirname)
def listdir_matches(match):
"""Returns a list of filenames contained in the named directory.
Only filenames which start with `match` will be returned.
Directories will have a trailing slash.
"""
import os
last_slash = match.rfind('/')
if last_slash == -1:
dirname = '.'
match_prefix = match
result_prefix = ''
else:
match_prefix = match[last_slash + 1:]
if last_slash == 0:
dirname = '/'
result_prefix = '/'
else:
dirname = match[0:last_slash]
result_prefix = dirname + '/'
def add_suffix_if_dir(filename):
try:
if (os.stat(filename)[0] & 0x4000) != 0:
return filename + '/'
except FileNotFoundError:
# This can happen when a symlink points to a non-existant file.
pass
return filename
matches = [add_suffix_if_dir(result_prefix + filename)
for filename in os.listdir(dirname) if filename.startswith(match_prefix)]
return matches
@extra_funcs(is_visible, lstat)
def listdir_lstat(dirname, show_hidden=True):
"""Returns a list of tuples for each file contained in the named
directory, or None if the directory does not exist. Each tuple
contains the filename, followed by the tuple returned by
calling os.stat on the filename.
"""
import os
try:
files = os.listdir(dirname)
except OSError:
return None
if dirname == '/':
return list((file, lstat('/' + file)) for file in files if is_visible(file) or show_hidden)
return list((file, lstat(dirname + '/' + file)) for file in files if is_visible(file) or show_hidden)
@extra_funcs(is_visible, stat)
def listdir_stat(dirname, show_hidden=True):
"""Returns a list of tuples for each file contained in the named
directory, or None if the directory does not exist. Each tuple
contains the filename, followed by the tuple returned by
calling os.stat on the filename.
"""
import os
try:
files = os.listdir(dirname)
except OSError:
return None
if dirname == '/':
return list((file, stat('/' + file)) for file in files if is_visible(file) or show_hidden)
return list((file, stat(dirname + '/' + file)) for file in files if is_visible(file) or show_hidden)
def make_directory(dirname):
"""Creates one or more directories."""
import os
try:
os.mkdir(dirname)
except:
return False
return True
def mkdir(filename):
"""Creates a directory."""
return auto(make_directory, filename)
def remove_file(filename, recursive=False, force=False):
"""Removes a file or directory."""
import os
try:
mode = os.stat(filename)[0]
if mode & 0x4000 != 0:
# directory
if recursive:
for file in os.listdir(filename):
success = remove_file(filename + '/' + file, recursive, force)
if not success and not force:
return False
os.rmdir(filename) # PGH Work like Unix: require recursive
else:
if not force:
return False
else:
os.remove(filename)
except:
if not force:
return False
return True
def rm(filename, recursive=False, force=False):
"""Removes a file or directory tree."""
return auto(remove_file, filename, recursive, force)
def make_dir(dst_dir, dry_run, print_func, recursed):
"""Creates a directory. Produces information in case of dry run.
Issues error where necessary.
"""
parent = os.path.split(dst_dir.rstrip('/'))[0] # Check for nonexistent parent
parent_files = auto(listdir_lstat, parent) if parent else True # Relative dir
if dry_run:
if recursed: # Assume success: parent not actually created yet
print_func("Creating directory {}".format(dst_dir))
elif parent_files is None:
print_func("Unable to create {}".format(dst_dir))
return True
if not mkdir(dst_dir):
print_err("Unable to create {}".format(dst_dir))
return False
return True
def rsync(src_dir, dst_dir, mirror, dry_run, print_func, recursed, sync_hidden):
"""Synchronizes 2 directory trees."""
# This test is a hack to avoid errors when accessing /flash. When the
# cache synchronisation issue is solved it should be removed
if not isinstance(src_dir, str) or not len(src_dir):
return
sstat = auto(get_stat, src_dir)
smode = stat_mode(sstat)
if mode_isfile(smode):
print_err('Source {} is a file not a directory.'.format(src_dir))
return
d_src = {} # Look up stat tuple from name in current directory
src_files = auto(listdir_stat, src_dir, show_hidden=sync_hidden)
if src_files is None:
print_err('Source directory {} does not exist.'.format(src_dir))
return
for name, stat in src_files:
d_src[name] = stat
d_dst = {}
dst_files = auto(listdir_stat, dst_dir, show_hidden=sync_hidden)
if dst_files is None: # Directory does not exist
if not make_dir(dst_dir, dry_run, print_func, recursed):
return
else: # dest exists
for name, stat in dst_files:
d_dst[name] = stat
set_dst = set(d_dst.keys())
set_src = set(d_src.keys())
to_add = set_src - set_dst # Files to copy to dest
to_del = set_dst - set_src # To delete from dest
to_upd = set_dst.intersection(set_src) # In both: may need updating
for src_basename in to_add: # Name in source but absent from destination
src_filename = src_dir + '/' + src_basename
dst_filename = dst_dir + '/' + src_basename
print_func("Adding %s" % dst_filename)
src_stat = d_src[src_basename]
src_mode = stat_mode(src_stat)
if not dry_run:
if not mode_isdir(src_mode):
cp(src_filename, dst_filename)
if mode_isdir(src_mode):
rsync(src_filename, dst_filename, mirror=mirror, dry_run=dry_run,
print_func=print_func, recursed=True, sync_hidden=sync_hidden)
if mirror: # May delete
for dst_basename in to_del: # In dest but not in source
dst_filename = dst_dir + '/' + dst_basename
print_func("Removing %s" % dst_filename)
if not dry_run:
rm(dst_filename, recursive=True, force=True)
for src_basename in to_upd: # Names are identical
src_stat = d_src[src_basename]
dst_stat = d_dst[src_basename]
src_filename = src_dir + '/' + src_basename
dst_filename = dst_dir + '/' + src_basename
src_mode = stat_mode(src_stat)
dst_mode = stat_mode(dst_stat)
if mode_isdir(src_mode):
if mode_isdir(dst_mode):
# src and dst are both directories - recurse
rsync(src_filename, dst_filename, mirror=mirror, dry_run=dry_run,
print_func=print_func, recursed=True, sync_hidden=sync_hidden)
else:
msg = "Source '{}' is a directory and destination " \
"'{}' is a file. Ignoring"
print_err(msg.format(src_filename, dst_filename))
else:
if mode_isdir(dst_mode):
msg = "Source '{}' is a file and destination " \
"'{}' is a directory. Ignoring"
print_err(msg.format(src_filename, dst_filename))
else:
print_func('Checking {}'.format(dst_filename))
if stat_mtime(src_stat) > stat_mtime(dst_stat):
msg = "{} is newer than {} - copying"
print_func(msg.format(src_filename, dst_filename))
if not dry_run:
cp(src_filename, dst_filename)
# rtc_time[0] - year 4 digit
# rtc_time[1] - month 1..12
# rtc_time[2] - day 1..31
# rtc_time[3] - weekday 1..7 1=Monday
# rtc_time[4] - hour 0..23
# rtc_time[5] - minute 0..59
# rtc_time[6] - second 0..59
# rtc_time[7] - yearday 1..366
# rtc_time[8] - isdst 0, 1, or -1
def set_time(rtc_time):
rtc = None
try:
# Pyboard (pyboard doesn't have machine.RTC()).
# The pyb.RTC.datetime function takes the arguments in the order:
# (year, month, day, weekday, hour, minute, second, subseconds)
# http://docs.micropython.org/en/latest/library/pyb.RTC.html#pyb.RTC.datetime
import pyb
rtc = pyb.RTC()
rtc.datetime(rtc_time)
except:
try:
import pycom
# PyCom's machine.RTC takes its arguments in a slightly different order
# than the official machine.RTC.
# (year, month, day, hour, minute, second[, microsecond[, tzinfo]])
# https://docs.pycom.io/firmwareapi/pycom/machine/rtc/#rtc-init-datetime-none-source-rtc-internal-rc
rtc_time2 = (rtc_time[0], rtc_time[1], rtc_time[2], rtc_time[4], rtc_time[5], rtc_time[6])
import machine
rtc = machine.RTC()
rtc.init(rtc_time2)
except:
try:
# The machine.RTC documentation was incorrect and doesn't agree with the code, so no link
# is presented here. The order of the arguments is the same as the pyboard.
import machine
rtc = machine.RTC()
try:
# ESP8266 uses rtc.datetime() rather than rtc.init()
rtc.datetime(rtc_time)
except:
# ESP32 (at least Loboris port) uses rtc.init()
rtc.init(rtc_time)
except:
# Check for the Raspberry Pi Pico - machine.RTC doesn't exist
try:
import os
if os.uname().sysname == 'rp2':
setup_0 = rtc_time[0] << 12 | rtc_time[1] << 8 | rtc_time[2]
setup_1 = (rtc_time[3] % 7) << 24 | rtc_time[4] << 16 | rtc_time[5] << 8 | rtc_time[6]
machine.mem32[0x4005c004] = setup_0
machine.mem32[0x4005c008] = setup_1
machine.mem32[0x4005c00c] |= 0x10
except:
pass
# 0x0D's sent from the host get transformed into 0x0A's, and 0x0A sent to the
# host get converted into 0x0D0A when using sys.stdin. sys.tsin.buffer does
# no transformations, so if that's available, we use it, otherwise we need
# to use hexlify in order to get unaltered data.
def recv_file_from_host(src_file, dst_filename, filesize, dst_mode='wb'):
"""Function which runs on the | |
import xarray as _xr
import copy as _copy
import xgcm as _xgcm
import numpy as _np
import warnings as _warnings
import sys as _sys
from . import compute as _compute
from . import plot as _plot
from . import animate as _animate
from . import utils as _utils
from . subsample import _subsampleMethdos
from . compute import _computeMethdos
from . plot import _plotMethdos
from . animate import _animateMethdos
try:
import cartopy.crs as _ccrs
except ImportError:
pass
try:
from scipy import spatial as _spatial
except ImportError:
pass
try:
from dask.diagnostics import ProgressBar as _ProgressBar
except ImportError:
pass
# TODO: add more xgcm options. E.g., default boundary method.
# TODO: add attributes to new coordinates (XU, XV, ...)
# TODO: implement xgcm autogenerate in _set_coords, set_grid_coords, set_coords when released
# TODO: _create_grid will be useless with the future release of xgcm. We will pass dictionary in xgcm.Grid,
# and we can have the option of usining comodo attributes (currently cleaned up so switched off)
class OceanDataset:
"""
OceanDataset combines a xarray.Dataset with other objects used by OceanSpy (e.g., xgcm.Grid).
Additional objects are attached to the xarray.Dataset as global attributes.
OceanDataset adds, reads, and decodes dataset global attributes.
"""
def __init__(self,
dataset):
"""
Parameters
----------
dataset: xarray.Dataset
The multi-dimensional, in memory, array database.
References
----------
http://xarray.pydata.org/en/stable/generated/xarray.Dataset.html
"""
# Check parameters
if not isinstance(dataset, _xr.Dataset):
raise TypeError("`dataset` must be a xarray.Dataset")
# Initialize dataset
self._ds = dataset.copy()
# Apply aliases
self = self._apply_aliases()
def __copy__(self):
"""
Shallow copy
"""
return OceanDataset(dataset = self.dataset.copy())
def __deepcopy__(self):
"""
Deep copy
"""
return OceanDataset(dataset = self.dataset.copy(deep=True))
def __repr__(self):
main_info = ['<oceanspy.OceanDataset>']
main_info.append('\nMain attributes:')
if self.dataset is not None:
main_info.append(" .dataset: %s" % self.dataset.__repr__()[self.dataset.__repr__().find('<'):
self.dataset.__repr__().find('>')+1])
if self.grid is not None:
main_info.append(" .grid: %s" % self.grid.__repr__()[self.grid.__repr__().find('<'):
self.grid.__repr__().find('>')+1])
if self.projection is not None:
main_info.append(" .projection: %s" % self.projection.__repr__()[self.projection.__repr__().find('<'):
self.projection.__repr__().find('>')+1])
more_info = ['\n\nMore attributes:']
if self.name:
more_info.append(" .name: %s" % self.name)
if self.description:
more_info.append(" .description: %s" % self.description)
if self.parameters:
more_info.append(" .parameters: %s" % type(self.parameters))
if self.aliases:
more_info.append(" .aliases: %s" % type(self.aliases))
if self.grid_coords:
more_info.append(" .grid_coords: %s" % type(self.grid_coords))
if self.grid_periodic:
more_info.append(" .grid_periodic: %s" % type(self.grid_periodic))
info = '\n'.join(main_info)
if len(more_info)>1:
info = info+'\n'.join(more_info)
return info
# ==================================
# IMPORT (used by open_oceandataset)
# ==================================
def _shift_averages(self):
"""
Shift average variables to time_midp.
Average variables must have attribute original_output = 'average'.
"""
for var in self._ds.data_vars:
original_output = self._ds[var].attrs.pop('original_output', None)
if original_output == 'average':
self._ds[var] = self._ds[var].drop('time').isel(time=slice(1, None)).rename({'time': 'time_midp'})
if original_output is not None:
self._ds[var].attrs['original_output'] = original_output
return self
def _set_coords(self, fillna=False, coords1Dfrom2D=False, coords2Dfrom1D=False, coordsUVfromG=False):
"""
Set dataset coordinates: dimensions + 2D horizontal coordinates.
Parameters
----------
fillna: bool
If True, fill NaNs in 2D coordinates propagating backward and forward.
coords1Dfrom2D: bool
If True, compute 1D coordinates from 2D coordinates (means).
Use with rectilinear grid only!
coords2Dfrom1D: bool
If True, compute 2D coordinates from 1D coordinates (brodacast).
coordsUVfromCG: bool
If True, compute missing coords (U and V points) from G points.
"""
# Check parameters
if not isinstance(fillna, bool):
raise TypeError('`fillna` must be bool')
if not isinstance(coords1Dfrom2D, bool):
raise TypeError('`coords1Dfrom2D` must be bool')
if not isinstance(coordsUVfromG, bool):
raise TypeError('`coordsUVfromG` must be bool')
if coords1Dfrom2D and coords2Dfrom1D:
raise TypeError('`coords1Dfrom2D` and `coords2Dfrom1D` can not be both True')
# Copy because the dataset will change
self = _copy.copy(self)
# Coordinates are dimensions only
self._ds = self._ds.reset_coords()
# Fill nans (e.g., because of exch2)
if fillna:
coords = ['YC', 'XC', 'YG', 'XG', 'YU', 'XU', 'YV', 'XV']
dims = ['X', 'Y', 'Xp1', 'Yp1', 'Xp1', 'Y', 'X', 'Yp1']
for i, (coord, dim) in enumerate(zip(coords, dims)):
if coord in self._ds.variables:
self._ds[coord] = self._ds[coord].ffill(dim).bfill(dim).persist()
# Get U and V by rolling G
if coordsUVfromG:
for i, (point_pos, dim2roll) in enumerate(zip(['U', 'V'], ['Yp1', 'Xp1'])):
for dim in ['Y', 'X']:
coord = self._ds[dim+'G'].rolling(**{dim2roll: 2}).mean().dropna(dim2roll)
coord = coord.drop(coord.coords).rename({dim2roll: dim2roll[0]})
self._ds[dim+point_pos] = coord
if 'units' in self._ds[dim+'G'].attrs:
self._ds[dim+point_pos].attrs['units'] = self._ds[dim+'G'].attrs['units']
# For cartesian grid we can use 1D coordinates
if coords1Dfrom2D:
# Take mean
self._ds['Y'] = self._ds['YC'].mean('X', keep_attrs=True).persist()
self._ds['X'] = self._ds['XC'].mean('Y', keep_attrs=True).persist()
self._ds['Yp1'] = self._ds['YG'].mean('Xp1', keep_attrs=True).persist()
self._ds['Xp1'] = self._ds['XG'].mean('Yp1', keep_attrs=True).persist()
# Get 2D coordinates broadcasting 1D
if coords2Dfrom1D:
# Broadcast
self._ds['YC'], self._ds['XC'] = _xr.broadcast(self._ds['Y'], self._ds['X'])
self._ds['YG'], self._ds['XG'] = _xr.broadcast(self._ds['Yp1'], self._ds['Xp1'])
self._ds['YU'], self._ds['XU'] = _xr.broadcast(self._ds['Y'], self._ds['Xp1'])
self._ds['YV'], self._ds['XV'] = _xr.broadcast(self._ds['Yp1'], self._ds['X'])
# Add units
for i, (D2, D1) in enumerate(zip(['YC', 'XC', 'YG', 'XG', 'YU', 'XU', 'YV', 'XV'],
['Y', 'X', 'Yp1', 'Xp1', 'Y', 'Xp1', 'Yp1', 'X'])):
if 'units' in self._ds[D1].attrs: self._ds[D2].attrs['units'] = self._ds[D1].attrs['units']
# Set 2D coordinates
self._ds = self._ds.set_coords(['YC', 'XC',
'YG', 'XG',
'YU', 'XU',
'YV', 'XV'])
return self
def import_MITgcm_rect_nc(self, shift_averages = True):
"""
Set coordinates of a dataset from a MITgcm run with rectilinear grid and data stored in NetCDF format.
Open and concatentate dataset before running this function.
Parameters
----------
shift_averages: bool
If True, shift average variable to time_midp.
Average variables must have attribute original_output = 'average'
"""
# Check parameters
if not isinstance(shift_averages, bool):
raise TypeError('`shift_averages` must be bool')
# Shift averages
if shift_averages is True:
self = self._shift_averages()
# Set coordinates
self = self._set_coords(fillna=True, coords1Dfrom2D=True)
grid_coords = {'Y' : {'Y': None, 'Yp1': 0.5},
'X' : {'X': None, 'Xp1': 0.5},
'Z' : {'Z': None, 'Zp1': 0.5, 'Zu': 0.5, 'Zl': -0.5},
'time' : {'time': -0.5}}
self = self.set_grid_coords(grid_coords = grid_coords, add_midp=True)
return self
def import_MITgcm_rect_bin(self, shift_averages = True):
"""
Set coordinates of a dataset from a MITgcm run with rectilinear grid and data stored in bin format.
Open and concatentate dataset before running this function.
Parameters
----------
shift_averages: bool
If True, shift average variable to time_midp.
Average variables must have attribute original_output = 'average'
"""
# Check parameters
if not isinstance(shift_averages, bool):
raise TypeError('`shift_averages` must be bool')
# Shift averages
if shift_averages is True:
self = self._shift_averages()
# Set coordinates
self = self._set_coords(coords2Dfrom1D=True)
grid_coords = {'Y' : {'Y': None, 'Yp1': 0.5},
'X' : {'X': None, 'Xp1': 0.5},
'Z' : {'Z': None, 'Zp1': 0.5, 'Zu': 0.5, 'Zl': -0.5},
'time' : {'time': -0.5}}
self = self.set_grid_coords(grid_coords = grid_coords, add_midp=True)
return self
def import_MITgcm_curv_nc(self, shift_averages = True):
"""
Set coordinates of a dataset from a MITgcm run with curvilinear grid and data stored in NetCDF format.
Open and concatentate dataset before running this function.
Parameters
----------
shift_averages: bool
If True, shift average variable to time_midp.
Average variables must have attribute original_output = 'average'
"""
# Check parameters
if not isinstance(shift_averages, bool):
raise TypeError('`shift_averages` must be bool')
# Shift averages
if shift_averages is True:
self = self._shift_averages()
# Set coordinates
self = self._set_coords(coordsUVfromG=True)
grid_coords = {'Y' : {'Y': None, 'Yp1': 0.5},
'X' : {'X': None, 'Xp1': 0.5},
'Z' : {'Z': None, 'Zp1': 0.5, 'Zu': 0.5, 'Zl': -0.5},
'time' : {'time': -0.5}}
self = self.set_grid_coords(grid_coords = grid_coords, add_midp=True)
return self
# ===========
# ATTRIBUTES
# ===========
# -------------------
# name
# -------------------
@property
def name(self):
"""
Name of the OceanDataset
"""
name = self._read_from_global_attr('name')
return name
@name.setter
def name(self, name):
"""
Inhibit setter
"""
raise AttributeError(_setter_error_message('name'))
def set_name(self, name, overwrite=None):
"""
Set name of the OceanDataset.
Parameters
----------
name: str
Name of the OceanDataset
overwrite: bool or None
If None, raise error if name has been previously set.
If True, overwrite previous name.
If False, combine with previous name.
"""
# Check parameters
if not isinstance(name, str):
raise TypeError("`name` must be str")
# Set name
self = self._store_as_global_attr(name = 'name',
attr = name,
overwrite = overwrite)
return self
# -------------------
# description
# -------------------
@property
def description(self):
"""
Description of the OceanDataset
"""
description = self._read_from_global_attr('description')
return description
@description.setter
def description(self, description):
"""
Inhibit setter
"""
raise AttributeError(_setter_error_message('description'))
def set_description(self, | |
new_iface)
if (bad_scenario_type != bad_scenario_ack_req_session_not_set_up and
bad_scenario_type != bad_scenario_ack_resp_session_not_set_up):
if (bad_scenario_type != bad_scenario_ack_req_session_not_established_init_side and
bad_scenario_type != bad_scenario_ack_resp_session_not_established_init_side and
bad_scenario_type != bad_scenario_ack_req_session_not_established_resp_side and
bad_scenario_type != bad_scenario_ack_resp_session_not_established_resp_side):
response = "accept"
else:
response = ''
initiator.initiate_session(sid, response)
if bad_scenario_type == bad_scenario_ack_req_session_not_set_up:
#fsts_id doesn't matter, no actual session exists
responder.send_test_ack_request('0')
initiator.wait_for_session_event(5)
# We want to send the unexpected frame to the side that already has
# a session created
elif bad_scenario_type == bad_scenario_ack_resp_session_not_set_up:
#fsts_id doesn't matter, no actual session exists
responder.send_test_ack_response('0')
initiator.wait_for_session_event(5)
# We want to send the unexpected frame to the side that already has
# a session created
elif bad_scenario_type == bad_scenario_ack_req_session_not_established_init_side:
#fsts_id doesn't matter, no actual session exists
initiator.send_test_ack_request('0')
responder.wait_for_session_event(5, ["EVENT_FST_SESSION_STATE"])
elif bad_scenario_type == bad_scenario_ack_req_session_not_established_resp_side:
#fsts_id doesn't matter, no actual session exists
responder.send_test_ack_request('0')
initiator.wait_for_session_event(5, ["EVENT_FST_SESSION_STATE"])
elif bad_scenario_type == bad_scenario_ack_resp_session_not_established_init_side:
#fsts_id doesn't matter, no actual session exists
initiator.send_test_ack_response('0')
responder.wait_for_session_event(5, ["EVENT_FST_SESSION_STATE"])
elif bad_scenario_type == bad_scenario_ack_resp_session_not_established_resp_side:
#fsts_id doesn't matter, no actual session exists
responder.send_test_ack_response('0')
initiator.wait_for_session_event(5, ["EVENT_FST_SESSION_STATE"])
elif bad_scenario_type == bad_scenario_ack_req_bad_fsts_id:
initiator.send_test_ack_request('-1')
responder.wait_for_session_event(5, ["EVENT_FST_SESSION_STATE"])
elif bad_scenario_type == bad_scenario_ack_resp_bad_fsts_id:
initiator.send_test_ack_response('-1')
responder.wait_for_session_event(5, ["EVENT_FST_SESSION_STATE"])
elif bad_scenario_type == bad_scenario_ack_resp_no_ack_req:
actual_fsts_id = initiator.get_fsts_id_by_sid(sid)
initiator.send_test_ack_response(str(actual_fsts_id))
responder.wait_for_session_event(5, ["EVENT_FST_SESSION_STATE"])
else:
raise Exception("Unknown bad scenario identifier")
except Exception, e:
if e.args[0].startswith("No FST-EVENT-SESSION received"):
bad_parameter_detected = True
if not bad_parameter_detected:
# The exception was unexpected
logger.info(e)
exception_already_raised = True
raise
finally:
fst_module_aux.disconnect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
if not exception_already_raised:
if bad_parameter_detected:
logger.info("Success. Bad scenario was handled correctly (%s)" % bad_scenario_names[bad_scenario_type])
else:
raise Exception("Failure. Bad scenario was handled incorrectly (%s)" % bad_scenario_names[bad_scenario_type])
else:
logger.info("Failure. Unexpected exception")
def test_fst_sta_connect_to_non_fst_ap(dev, apdev, test_params):
"""FST STA connecting to non-FST AP"""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
with HWSimRadio() as (radio, iface):
non_fst_ap = hostapd.add_ap(iface, { "ssid": "non_fst_11g" })
try:
orig_sta1_mbies = sta1.get_local_mbies()
orig_sta2_mbies = sta2.get_local_mbies()
vals = sta2.scan()
freq = vals['freq']
sta2.connect_to_external_ap(non_fst_ap, ssid="non_fst_11g",
key_mgmt="NONE", scan_freq=freq)
time.sleep(2)
res_sta1_mbies = sta1.get_local_mbies()
res_sta2_mbies = sta2.get_local_mbies()
if (orig_sta1_mbies.startswith("FAIL") or
orig_sta2_mbies.startswith("FAIL") or
res_sta1_mbies.startswith("FAIL") or
res_sta2_mbies.startswith("FAIL")):
raise Exception("Failure. MB IEs must be present on the stations")
except Exception, e:
logger.info(e)
raise
finally:
sta2.disconnect_from_external_ap()
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
hostapd.HostapdGlobal().remove(iface)
def test_fst_sta_connect_to_fst_ap(dev, apdev, test_params):
"""FST STA connecting to FST AP"""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
try:
orig_sta2_mbies = sta2.get_local_mbies()
vals = sta1.scan(freq=fst_test_common.fst_test_def_freq_a)
sta1.connect(ap1, key_mgmt="NONE",
scan_freq=fst_test_common.fst_test_def_freq_a)
time.sleep(2)
res_sta2_mbies = sta2.get_local_mbies()
if res_sta2_mbies == orig_sta2_mbies:
raise Exception("Failure. MB IEs have not been updated")
except Exception, e:
logger.info(e)
raise
finally:
sta1.disconnect()
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
def test_fst_ap_connect_to_fst_sta(dev, apdev, test_params):
"""FST AP connecting to FST STA"""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
try:
orig_ap_mbies = ap1.get_local_mbies()
vals = sta1.scan(freq=fst_test_common.fst_test_def_freq_a)
sta1.connect(ap1, key_mgmt="NONE",
scan_freq=fst_test_common.fst_test_def_freq_a)
time.sleep(2)
res_ap_mbies = ap1.get_local_mbies()
if res_ap_mbies != orig_ap_mbies:
raise Exception("Failure. MB IEs have been unexpectedly updated on the AP")
except Exception, e:
logger.info(e)
raise
finally:
sta1.disconnect()
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
def test_fst_ap_connect_to_non_fst_sta(dev, apdev, test_params):
"""FST AP connecting to non-FST STA"""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
try:
orig_ap_mbies = ap2.get_local_mbies()
vals = dev[0].scan(None, fst_test_common.fst_test_def_freq_g)
fst_module_aux.external_sta_connect(dev[0], ap2, key_mgmt="NONE",
scan_freq=fst_test_common.fst_test_def_freq_g)
time.sleep(2)
res_ap_mbies = ap2.get_local_mbies()
if res_ap_mbies != orig_ap_mbies:
raise Exception("Failure. MB IEs have been unexpectedly updated on the AP")
except Exception, e:
logger.info(e)
raise
finally:
fst_module_aux.disconnect_external_sta(dev[0], ap2)
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
def test_fst_second_sta_connect_to_non_fst_ap(dev, apdev, test_params):
"""FST STA 2nd connecting to non-FST AP"""
fst_ap1, fst_ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
with HWSimRadio() as (radio, iface):
non_fst_ap = hostapd.add_ap(iface, { "ssid": "non_fst_11g" })
try:
vals = sta1.scan(freq=fst_test_common.fst_test_def_freq_a)
sta1.connect(fst_ap1, key_mgmt="NONE", scan_freq=fst_test_common.fst_test_def_freq_a)
time.sleep(2)
orig_sta1_mbies = sta1.get_local_mbies()
orig_sta2_mbies = sta2.get_local_mbies()
vals = sta2.scan()
freq = vals['freq']
sta2.connect_to_external_ap(non_fst_ap, ssid="non_fst_11g", key_mgmt="NONE", scan_freq=freq)
time.sleep(2)
res_sta1_mbies = sta1.get_local_mbies()
res_sta2_mbies = sta2.get_local_mbies()
if (orig_sta1_mbies.startswith("FAIL") or
orig_sta2_mbies.startswith("FAIL") or
res_sta1_mbies.startswith("FAIL") or
res_sta2_mbies.startswith("FAIL")):
raise Exception("Failure. MB IEs must be present on the stations")
except Exception, e:
logger.info(e)
raise
finally:
sta1.disconnect()
sta2.disconnect_from_external_ap()
fst_module_aux.stop_two_ap_sta_pairs(fst_ap1, fst_ap2, sta1, sta2)
hostapd.HostapdGlobal().remove(iface)
def test_fst_second_sta_connect_to_fst_ap(dev, apdev, test_params):
"""FST STA 2nd connecting to FST AP"""
fst_ap1, fst_ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
with HWSimRadio() as (radio, iface):
non_fst_ap = hostapd.add_ap(iface, { "ssid": "non_fst_11g" })
try:
vals = sta2.scan()
freq = vals['freq']
sta2.connect_to_external_ap(non_fst_ap, ssid="non_fst_11g", key_mgmt="NONE", scan_freq=freq)
time.sleep(2)
orig_sta1_mbies = sta1.get_local_mbies()
orig_sta2_mbies = sta2.get_local_mbies()
vals = sta1.scan(freq=fst_test_common.fst_test_def_freq_a)
sta1.connect(fst_ap1, key_mgmt="NONE", scan_freq=fst_test_common.fst_test_def_freq_a)
time.sleep(2)
res_sta1_mbies = sta1.get_local_mbies()
res_sta2_mbies = sta2.get_local_mbies()
if (orig_sta1_mbies.startswith("FAIL") or
orig_sta2_mbies.startswith("FAIL") or
res_sta1_mbies.startswith("FAIL") or
res_sta2_mbies.startswith("FAIL")):
raise Exception("Failure. MB IEs must be present on the stations")
except Exception, e:
logger.info(e)
raise
finally:
sta1.disconnect()
sta2.disconnect_from_external_ap()
fst_module_aux.stop_two_ap_sta_pairs(fst_ap1, fst_ap2, sta1, sta2)
hostapd.HostapdGlobal().remove(iface)
def test_fst_disconnect_1_of_2_stas_from_non_fst_ap(dev, apdev, test_params):
"""FST disconnect 1 of 2 STAs from non-FST AP"""
fst_ap1, fst_ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
with HWSimRadio() as (radio, iface):
non_fst_ap = hostapd.add_ap(iface, { "ssid": "non_fst_11g" })
try:
vals = sta1.scan(freq=fst_test_common.fst_test_def_freq_a)
sta1.connect(fst_ap1, key_mgmt="NONE", scan_freq=fst_test_common.fst_test_def_freq_a)
vals = sta2.scan()
freq = vals['freq']
sta2.connect_to_external_ap(non_fst_ap, ssid="non_fst_11g", key_mgmt="NONE", scan_freq=freq)
time.sleep(2)
orig_sta1_mbies = sta1.get_local_mbies()
orig_sta2_mbies = sta2.get_local_mbies()
sta2.disconnect_from_external_ap()
time.sleep(2)
res_sta1_mbies = sta1.get_local_mbies()
res_sta2_mbies = sta2.get_local_mbies()
if (orig_sta1_mbies.startswith("FAIL") or
orig_sta2_mbies.startswith("FAIL") or
res_sta1_mbies.startswith("FAIL") or
res_sta2_mbies.startswith("FAIL")):
raise Exception("Failure. MB IEs must be present on the stations")
except Exception, e:
logger.info(e)
raise
finally:
sta1.disconnect()
sta2.disconnect_from_external_ap()
fst_module_aux.stop_two_ap_sta_pairs(fst_ap1, fst_ap2, sta1, sta2)
hostapd.HostapdGlobal().remove(iface)
def test_fst_disconnect_1_of_2_stas_from_fst_ap(dev, apdev, test_params):
"""FST disconnect 1 of 2 STAs from FST AP"""
fst_ap1, fst_ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
with HWSimRadio() as (radio, iface):
non_fst_ap = hostapd.add_ap(iface, { "ssid": "non_fst_11g" })
try:
vals = sta1.scan(freq=fst_test_common.fst_test_def_freq_a)
sta1.connect(fst_ap1, key_mgmt="NONE", scan_freq=fst_test_common.fst_test_def_freq_a)
vals = sta2.scan()
freq = vals['freq']
sta2.connect_to_external_ap(non_fst_ap, ssid="non_fst_11g", key_mgmt="NONE", scan_freq=freq)
time.sleep(2)
orig_sta1_mbies = sta1.get_local_mbies()
orig_sta2_mbies = sta2.get_local_mbies()
sta1.disconnect()
time.sleep(2)
res_sta1_mbies = sta1.get_local_mbies()
res_sta2_mbies = sta2.get_local_mbies()
if (orig_sta1_mbies.startswith("FAIL") or
orig_sta2_mbies.startswith("FAIL") or
res_sta1_mbies.startswith("FAIL") or
res_sta2_mbies.startswith("FAIL")):
raise Exception("Failure. MB IEs must be present on the stations")
except Exception, e:
logger.info(e)
raise
finally:
sta1.disconnect()
sta2.disconnect_from_external_ap()
fst_module_aux.stop_two_ap_sta_pairs(fst_ap1, fst_ap2, sta1, sta2)
hostapd.HostapdGlobal().remove(iface)
def test_fst_disconnect_2_of_2_stas_from_non_fst_ap(dev, apdev, test_params):
"""FST disconnect 2 of 2 STAs from non-FST AP"""
fst_ap1, fst_ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
with HWSimRadio() as (radio, iface):
non_fst_ap = hostapd.add_ap(iface, { "ssid": "non_fst_11g" })
try:
vals = sta1.scan(freq=fst_test_common.fst_test_def_freq_a)
sta1.connect(fst_ap1, key_mgmt="NONE", scan_freq=fst_test_common.fst_test_def_freq_a)
vals = sta2.scan()
freq = vals['freq']
sta2.connect_to_external_ap(non_fst_ap, ssid="non_fst_11g", key_mgmt="NONE", scan_freq=freq)
time.sleep(2)
sta1.disconnect()
time.sleep(2)
orig_sta1_mbies = sta1.get_local_mbies()
orig_sta2_mbies = sta2.get_local_mbies()
sta2.disconnect_from_external_ap()
time.sleep(2)
res_sta1_mbies = sta1.get_local_mbies()
res_sta2_mbies = sta2.get_local_mbies()
if (orig_sta1_mbies.startswith("FAIL") or
orig_sta2_mbies.startswith("FAIL") or
res_sta1_mbies.startswith("FAIL") or
res_sta2_mbies.startswith("FAIL")):
raise Exception("Failure. MB IEs must be present on the stations")
except Exception, e:
logger.info(e)
raise
finally:
sta1.disconnect()
sta2.disconnect_from_external_ap()
fst_module_aux.stop_two_ap_sta_pairs(fst_ap1, fst_ap2, sta1, sta2)
hostapd.HostapdGlobal().remove(iface)
def test_fst_disconnect_2_of_2_stas_from_fst_ap(dev, apdev, test_params):
"""FST disconnect 2 of 2 STAs from FST AP"""
fst_ap1, fst_ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
with HWSimRadio() as (radio, iface):
non_fst_ap = hostapd.add_ap(iface, { "ssid": "non_fst_11g"})
try:
vals = sta1.scan(freq=fst_test_common.fst_test_def_freq_a)
sta1.connect(fst_ap1, key_mgmt="NONE", scan_freq=fst_test_common.fst_test_def_freq_a)
vals = sta2.scan()
freq = vals['freq']
sta2.connect_to_external_ap(non_fst_ap, ssid="non_fst_11g", key_mgmt="NONE", scan_freq=freq)
time.sleep(2)
sta2.disconnect_from_external_ap()
time.sleep(2)
orig_sta1_mbies = sta1.get_local_mbies()
orig_sta2_mbies = sta2.get_local_mbies()
sta1.disconnect()
time.sleep(2)
res_sta1_mbies = sta1.get_local_mbies()
res_sta2_mbies = sta2.get_local_mbies()
if (orig_sta1_mbies.startswith("FAIL") or
orig_sta2_mbies.startswith("FAIL") or
res_sta1_mbies.startswith("FAIL") or
res_sta2_mbies.startswith("FAIL")):
raise Exception("Failure. MB IEs should have stayed present on both stations")
# Mandatory part of 192.168.3.11 Multi-band element is 24 bytes = 48 hex chars
basic_sta1_mbies = res_sta1_mbies[0:48] + res_sta1_mbies[60:108]
basic_sta2_mbies = res_sta2_mbies[0:48] + res_sta2_mbies[60:108]
if (basic_sta1_mbies != basic_sta2_mbies):
raise Exception("Failure. Basic MB IEs should have become identical on both stations")
addr_sta1_str = sta1.get_own_mac_address().replace(":", "")
addr_sta2_str = sta2.get_own_mac_address().replace(":", "")
# Mandatory part of 192.168.3.11 Multi-band element is followed by STA MAC Address field (6 bytes = 12 hex chars)
addr_sta1_mbie1 = res_sta1_mbies[48:60]
addr_sta1_mbie2 = res_sta1_mbies[108:120]
addr_sta2_mbie1 = res_sta2_mbies[48:60]
addr_sta2_mbie2 = res_sta2_mbies[108:120]
if (addr_sta1_mbie1 != addr_sta1_mbie2 or
addr_sta1_mbie1 != addr_sta2_str or
addr_sta2_mbie1 != addr_sta2_mbie2 or
addr_sta2_mbie1 != addr_sta1_str):
raise Exception("Failure. STA Address in MB IEs should have been same as the other STA's")
except Exception, e:
logger.info(e)
raise
finally:
sta1.disconnect()
sta2.disconnect_from_external_ap()
fst_module_aux.stop_two_ap_sta_pairs(fst_ap1, fst_ap2, sta1, sta2)
hostapd.HostapdGlobal().remove(iface)
def test_fst_disconnect_non_fst_sta(dev, apdev, test_params):
"""FST disconnect non-FST STA"""
ap1, ap2, fst_sta1, fst_sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
external_sta_connected = False
try:
vals = fst_sta1.scan(freq=fst_test_common.fst_test_def_freq_a)
fst_sta1.connect(ap1, key_mgmt="NONE",
scan_freq=fst_test_common.fst_test_def_freq_a)
vals = dev[0].scan(None, fst_test_common.fst_test_def_freq_g)
fst_module_aux.external_sta_connect(dev[0], ap2, key_mgmt="NONE",
scan_freq=fst_test_common.fst_test_def_freq_g)
external_sta_connected = True
time.sleep(2)
fst_sta1.disconnect()
time.sleep(2)
orig_ap_mbies = ap2.get_local_mbies()
fst_module_aux.disconnect_external_sta(dev[0], ap2)
external_sta_connected = False
time.sleep(2)
res_ap_mbies = ap2.get_local_mbies()
if res_ap_mbies != orig_ap_mbies:
raise Exception("Failure. MB IEs have been unexpectedly updated on the AP")
except Exception, e:
logger.info(e)
raise
finally:
fst_sta1.disconnect()
if external_sta_connected:
fst_module_aux.disconnect_external_sta(dev[0], ap2)
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, fst_sta1, fst_sta2)
def test_fst_disconnect_fst_sta(dev, apdev, test_params):
"""FST disconnect FST STA"""
ap1, ap2, fst_sta1, fst_sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
external_sta_connected = False
try:
vals = fst_sta1.scan(freq=fst_test_common.fst_test_def_freq_a)
fst_sta1.connect(ap1, key_mgmt="NONE",
scan_freq=fst_test_common.fst_test_def_freq_a)
vals = dev[0].scan(None, fst_test_common.fst_test_def_freq_g)
fst_module_aux.external_sta_connect(dev[0], ap2, key_mgmt="NONE",
scan_freq=fst_test_common.fst_test_def_freq_g)
external_sta_connected = True
time.sleep(2)
fst_module_aux.disconnect_external_sta(dev[0], ap2)
external_sta_connected = False
time.sleep(2)
orig_ap_mbies = ap2.get_local_mbies()
fst_sta1.disconnect()
| |
along axis=0 (first index).
Image sequences are stored in three-dimensional arrays, in rows, columns and frames.
Not all libraries share the same sequencing, some store frames along axis=0 and
others store frames along axis=2. This function reorders an image sequence with
frames along axis=2 to an image sequence with frames along axis=0. The function
uses np.transpose(imageSequence, (2,0,1))
Args:
| imageSequence (3-D np.array): image sequence in three-dimensional array, frames along axis=2
Returns:
| ((3-D np.array): reordered three-dimensional array (view or copy)
Raises:
| No exception is raised.
"""
return np.transpose(imageSequence, (2,0,1))
##############################################################################
##
def framesLast(imageSequence):
"""Image sequence with frames along axis=0 (first index), reordered such that
frames are along axis=2 (last index).
Image sequences are stored in three-dimensional arrays, in rows, columns and frames.
Not all libraries share the same sequencing, some store frames along axis=0 and
others store frames along axis=2. This function reorders an image sequence with
frames along axis=0 to an image sequence with frames along axis=2. The function
uses np.transpose(imageSequence, (1,2,0))
Args:
| imageSequence (3-D np.array): image sequence in three-dimensional array, frames along axis=0
Returns:
| ((3-D np.array): reordered three-dimensional array (view or copy)
Raises:
| No exception is raised.
"""
return np.transpose(imageSequence, (1,2,0))
##############################################################################
##
def index_coords(data, origin=None, framesFirst=True):
"""Creates (x,y) zero-based coordinate arrrays for a numpy array indices, relative to some origin.
This function calculates two meshgrid arrays containing the coordinates of the
input array. The origin of the new coordinate system defaults to the
center of the image, unless the user supplies a new origin.
The data format can be data.shape = (rows, cols, frames) or
data.shape = (frames, rows, cols), the format of which is indicated by the
framesFirst parameter.
Args:
| data (np.array): array for which coordinates must be calculated.
| origin ( (x-orig, y-orig) ): data-coordinates of where origin should be
| framesFirst (bool): True if data.shape is (frames, rows, cols), False if
data.shape is (rows, cols, frames)
Returns:
| x (float np.array): x coordinates in array format.
| y (float np.array): y coordinates in array format.
Raises:
| No exception is raised.
original code by <NAME>
https://stackoverflow.com/questions/3798333/image-information-along-a-polar-coordinate-system
"""
if framesFirst:
ny, nx = data.shape[1:3]
else:
ny, nx = data.shape[:2]
if origin is None:
origin_x, origin_y = nx // 2, ny // 2
else:
origin_x, origin_y = origin
x, y = np.meshgrid(np.arange(nx), np.arange(ny))
x -= origin_x
y -= origin_y
return x, y
##############################################################################
##
def cart2polar(x, y):
"""Converts from cartesian to polar coordinates, given (x,y) to (r,theta).
Args:
| x (float np.array): x values in array format.
| y (float np.array): y values in array format.
Returns:
| r (float np.array): radial component for given (x,y).
| theta (float np.array): angular component for given (x,y).
Raises:
| No exception is raised.
original code by <NAME>
https://stackoverflow.com/questions/3798333/image-information-along-a-polar-coordinate-system
"""
r = np.sqrt(x**2 + y**2)
theta = np.arctan2(y, x)
return r, theta
##############################################################################
##
def polar2cart(r, theta):
"""Converts from polar to cartesian coordinates, given (r,theta) to (x,y).
Args:
| r (float np.array): radial values in array format.
| theta (float np.array): angular values in array format.
Returns:
| x (float np.array): x component for given (r, theta).
| y (float np.array): y component for given (r, theta).
Raises:
| No exception is raised.
original code by <NAME>
https://stackoverflow.com/questions/3798333/image-information-along-a-polar-coordinate-system
"""
x = r * np.cos(theta)
y = r * np.sin(theta)
return x, y
##############################################################################
##
def upMu(uprightMu=True, textcomp=False):
"""Returns a LaTeX micron symbol, either an upright version or the normal symbol.
The upright symbol requires that the siunitx LaTeX package be installed on the
computer running the code. This function also changes the Matplotlib rcParams
file.
Args:
| uprightMu (bool): signals upright (True) or regular (False) symbol (optional).
| textcomp (bool): if True use the textcomp package, else use siunitx package (optional).
Returns:
| range (string): LaTeX code for the micro symbol.
Raises:
| No exception is raised.
"""
if sys.version_info[0] < 3:
if uprightMu:
from matplotlib import rc, font_manager
import matplotlib as mpl
rc('text', usetex=True)
# set up the use of external latex, fonts and packages
if not textcomp :
mpl.rcParams['text.latex.preamble'] = [
# r'\usepackage{siunitx}', # i need upright \micro symbols, but you need...
'\\usepackage{siunitx}', # i need upright \micro symbols, but you need...
'\\sisetup{detect-all}', # ...this to force siunitx to actually use your fonts
'\\usepackage{helvet}', # set the normal font here
'\\usepackage{sansmath}', # load up the sansmath so that math -> helvet
'\\sansmath'] # <- tricky! -- gotta actually tell tex to use!
upmu = '\si{\micro}'
else:
mpl.rcParams['text.latex.preamble'] = [
'\\usepackage{textcomp}', # i need upright \micro symbols, but you need...
'\\usepackage{helvet}', # set the normal font here
'\\usepackage{sansmath}', # load up the sansmath so that math -> helvet
'\\sansmath' # <- tricky! -- gotta actually tell tex to use!
]
upmu = '\\textmu{}'
else:
upmu = '$\\mu$'
else:
upmu = '\u00B5'
return upmu
##############################################################################
##
def detectFARThresholdToNoisepulseWidth(ThresholdToNoise, pulseWidth):
""" Solve for the FAR, given the threshold to noise ratio and pulse width, for matched filter.
References:
"Electro-optics handbook," Tech. Rep. EOH-11, RCA, 1974. RCA Technical Series Publication.
<NAME>, Detection Theory: Applications and Digital Signal Processing, CRC Press, 2002
Args:
| ThresholdToNoise (float): the threshold to noise ratio.
| pulseWidth (float): the signal pulse width in [s].
Returns:
| FAR (float): the false alarm rate in [alarms/s]
Raises:
| No exception is raised.
"""
FAR = np.exp(- (ThresholdToNoise ** 2) / 2.) / (2. * pulseWidth * np.sqrt(3))
return FAR
##############################################################################
##
def detectThresholdToNoiseTpFAR(pulseWidth, FAR):
""" Solve for threshold to noise ratio, given pulse width and FAR, for matched filter.
Using the theory of matched filter design, calculate the
threshold to noise ratio, to achieve a required false alarm rate.
References:
"Electro-optics handbook," Tech. Rep. EOH-11, RCA, 1974. RCA Technical Series Publication.
<NAME>, Detection Theory: Applications and Digital Signal Processing, CRC Press, 2002
Args:
| pulseWidth (float): the signal pulse width in [s].
| FAR (float): the false alarm rate in [alarms/s]
Returns:
| range (float): threshold to noise ratio
Raises:
| No exception is raised.
"""
ThresholdToNoise = np.sqrt(-2 * np.log (2 * pulseWidth * np.sqrt(3) * FAR ))
return ThresholdToNoise
##############################################################################
##
def detectSignalToNoiseThresholdToNoisePd(ThresholdToNoise, pD):
""" Solve for the signal to noise ratio, given the threshold to noise ratio and
probability of detection.
Using the theory of matched filter design, calculate the
signal to noise ratio, to achieve a required probability of detection.
References:
"Electro-optics handbook," Tech. Rep. EOH-11, RCA, 1974. RCA Technical Series Publication.
<NAME>, Detection Theory: Applications and Digital Signal Processing, CRC Press, 2002
Args:
| ThresholdToNoise (float): the threshold to noise ratio [-]
| pD (float): the probability of detection [-]
Returns:
| range (float): signal to noise ratio
Raises:
| No exception is raised.
"""
import scipy.special
SignalToNoise = np.sqrt(2) * scipy.special.erfinv(2 * pD -1) + ThresholdToNoise
return SignalToNoise
##############################################################################
##
def detectThresholdToNoiseSignalToNoisepD(SignalToNoise, pD):
""" Solve for the threshold to noise ratio, given the signal to noise ratio and
probability of detection.
References:
"Electro-optics handbook," Tech. Rep. EOH-11, RCA, 1974. RCA Technical Series Publication.
<NAME>, Detection Theory: Applications and Digital Signal Pro-cessing, CRC Press, 2002
Args:
| SignalToNoise (float): the signal to noise ratio [-]
| pD (float): the probability of detection [-]
Returns:
| range (float): signal to noise ratio
Raises:
| No exception is raised.
"""
import scipy.special
ThresholdToNoise = SignalToNoise - np.sqrt(2) * scipy.special.erfinv(2 * pD -1)
return ThresholdToNoise
##############################################################################
##
def detectProbabilityThresholdToNoiseSignalToNoise(ThresholdToNoise, SignalToNoise):
""" Solve for the probability of detection, given the signal to noise ratio and
threshold to noise ratio
References:
"Electro-optics handbook," Tech. Rep. EOH-11, RCA, 1974. RCA Technical Series Publication.
<NAME>, Detection Theory: Applications and Digital Signal Pro-cessing, CRC Press, 2002
Args:
| ThresholdToNoise (float): the threshold to noise ratio [-]
| SignalToNoise (float): the signal to noise ratio [-]
Returns:
| range (float): probability of detection
Raises:
| No exception is raised.
"""
import scipy.special
pD = 0.5 * (scipy.special.erf((SignalToNoise - ThresholdToNoise) / np.sqrt(2)) + 1)
return pD
##############################################################################
##
def rangeEquation(Intensity, Irradiance, rangeTab, tauTab, rangeGuess = 1, n = 2):
""" | |
- initializer : output = initializer ( )
- directory : change to this directory (if it exists)
- environment : additional environment for the job
- append_to : additional variables to be ''appended''
- prepend_to : additional variables to be ''prepended''
"""
if not merger and not collector :
import operator
merger = operator.add
self.__processor = processor
self.__merger = merger
self.__collector = collector
self.__initializer = initializer
self.__output = None
self.directory = directory
self.environment . update ( environment )
self.append_to . update ( append_to )
self.prepend_to . update ( prepend_to )
self.cleanup = cleanup
# =========================================================================
## local initialization (executed once in parent process)
def initialize_local ( self ) :
"""Local initialization (executed once in the parent process)"""
self.__output = self.initializer () if self.initializer else None
# =========================================================================
## the actual processing of the single item
def process ( self , jobid , *params ) :
"""The actual processing of the single item"""
return self.processor ( jobid , *params )
# =========================================================================
## merge results
def merge_results ( self , result , jobid = -1 ) :
"""Merge processing results"""
if self.collector :
self.__output = self.collector ( self.__output , result , jobid )
else :
self.__output = self.merger ( self.__output , result )
# =========================================================================
## get the final results
def results ( self ) :
"""Get the final(merged/collected) results"""
return self.__output
# =========================================================================
@property
def processor ( self ) :
"""``processor'' : the actual function for each subprocess
- Signature: output = processor ( item )
"""
return self.__processor
@property
def merger ( self ) :
"""``merger'' : the actual fuction to merge results
- Signature: updated_output = merger ( old_output , new_output )
"""
return self.__merger
@property
def collector ( self ) :
"""``collector'' : the actual fuction to merge/collect results
- Signature: updated_output = collector ( old_output , new_output , jobid )
"""
return self.__collector
@property
def initializer ( self ) :
"""``initializer'' : the actual fuction to initialize local output
- Signature: output = initializer()
"""
return self.__initializer
# =============================================================================
## Simple task to execute the callable object/function
class FuncTask(Task) :
"""Simple task for parallel processing.
- func : function
- merger : updated_output = merger ( old_output , new_output )
- initializer : output = initializer ( )
- directory : change to this directory (if it exists)
- environment : additional environment for the job
- append_to : additional variables to be ''appended''
- prepend_to : additional variables to be ''prepended''
"""
def __init__ ( self ,
func ,
merger = None ,
initializer = tuple ,
directory = None ,
environment = {} ,
append_to = {} ,
prepend_to = {} ,
cleanup = True ) :
self.__function = func
self.__merger = merger
self.__output = None
self.__initializer = initializer
self.__output = None
self.directory = directory
self.environment . update ( environment )
self.append_to . update ( append_to )
self.prepend_to . update ( prepend_to )
self.cleanup = cleanup
# =========================================================================
## local initialization (executed once in parent process)
def initialize_local ( self ) :
"""Local initialization (executed once in parent process)"""
self.__output = self.initializer () if self.initializer else None
# =========================================================================
## the main processing method
def process ( self , jobid , *params ) :
result = self.__function ( jobid , *params )
self.__output = result
return result
# =========================================================================
## merge results
def merge_results ( self , result ) :
"""Merge processing results"""
if self.merger :
self.__output = self.merger ( self.__output , result )
# =========================================================================
## get the final results
def results ( self ) :
"""Get the final (merged) results"""
return self.__output
@property
def merger ( self ) :
"""``merger'' : the actual fuction to merge results
- Signature: updated_output = merger ( old_output , new_output )
"""
return self.__merger
@property
def initializer ( self ) :
"""``initializer'' : the actual fuction to initialize local output
- Signature: output = initializer()
"""
return self.__initializer
# =============================================================================
## @class Statistics
# helper class to collect statistics
# @author <NAME> <EMAIL>
class Statistics(object):
"""Helper class to collect statistics
"""
def __init__ ( self , host = None ) :
import time
if not host :
import socket
self.__host = socket.getfqdn ()
else :
self.__host = host
self.__start = time.time ( )
self.time = 0.0
self.njobs = 0
def stop ( self ) :
import time
self.time = time.time () - self.__start
self.njobs += 1
def __enter__ ( self ) : return self
def __exit__ ( self , *_ ) : self.stop()
@property
def host ( self ) :
"""``host'' : the host where the job executed"""
return self.__host
def __repr__ ( self ) :
return "Statistics(%s,time=%.5g,njobs=%d)" % ( self.host , self.time , self.njobs )
__str__ = __repr__
# =============================================================================
## Simple class to merge the job execution statistics
class StatMerger(object) :
"""Simple class to merge the job execution statistics
"""
def __init__ ( self ) : self.__merged = {}
def __iadd__ ( self , stat ) :
if isinstance ( stat , StatMerger ) :
for host , se in stat :
if host in self.__merged :
ss = self.__merged [ host ]
ss += se
else :
self.__merged [ host ] = se
return self
if not stat.host in self.__merged :
self.__merged [ stat.host ] = Statistics()
se = self.__merged [ stat.host ]
se.time += stat.time
se.njobs += stat.njobs
return self
def __len__ ( self ) : return len ( self.__merged )
# =========================================================================
## iterator over merged statictics :
# @code
# merged = ...
# for host , stat in merged :
# ...
# @endcode
def __iter__ ( self ) :
"""Iterator over merged statictics :
>>> merged = ...
>>> for host , stat in merged :
...
"""
for h in self.__merged :
yield h, self.__merged [ h ]
@property
def merged ( self ) :
"""``merged'' : get the full merged statistic"""
return self.__merged
# =========================================================================
## Print the job execution statistics
# @code
# merged = ...
# merged.print_stat ()
# @endcode
def print_stats ( self , prefix = '' , cputime = None ) :
"""Print job execution sstatistics
>>> merged = ...
>>> merged.print_stats ()
"""
suffix = ''
if cputime and 0 < cputime :
sumtime = 0
for host in self.__merged :
se = self.__merged[host]
sumtime += se.time
if 0 < sumtime :
h1 , r1 = divmod ( cputime , 3600 )
m1 , s1 = divmod ( r1 , 60 )
h2 , r2 = divmod ( sumtime , 3600 )
m2 , s2 = divmod ( r2 , 60 )
h1 = int ( h1 )
h2 = int ( h2 )
m1 = int ( m1 )
m2 = int ( m2 )
s1 = int ( s1 )
s2 = int ( s2 )
if h1 : suffix = ' %02d:%02d:%02ds' % ( h1 , m1 , s1 )
elif m1 : suffix = ' %02d:%02ds' % ( m1 , s1 )
else : suffix = ' %2ds' % s1
if h2 : suffix += ' vs %02d:%02d:%02ds' % ( h2 , m2 , s2 )
elif m2 : suffix += ' vs %02d:%02ds' % ( m2 , s2 )
else : suffix += ' vs %2ds' % s2
gain = float ( sumtime ) / cputime
suffix += ' (Gain: %.1f)' % gain
title = prefix + 'Job execution | |
<filename>BF_functions.py
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
from astropy.io import fits
from astropy.time import Time
from PyAstronomy import pyasl
from scipy import ndimage
import pandas as pd
import gaussfitter as gf
'''
Functions used in BF_python.py
Read the damn comments
(I'm sorry there aren't more objects)
'''
def logify_spec(isAPOGEE=False, w00=5400, n=38750, stepV=1.7, m=171):
# The new log-wavelength array will be w1. it will have equal spacing in velocity.
# Specify reasonable values when you call this function or else bad things will happen.
### GUIDELINES FOR CHOOSING GOOD INPUT VALUES ###
# good APOGEE values
# w00 = 15145 # starting wavelength of the log-wave array in Angstroms
# n = 20000 # desired length of the log-wave vector in pixels (must be EVEN)
# good ARCES values
# w00 = 5400 # starting wavelength of the log-wave array in Angstroms
# n = 38750 # desired length of the log-wave vector in pixels (must be EVEN)
# stepV = 1.7 # step in velocities in the wavelength vector w1
# m = 171 # length of BF (must be ODD)
### GUIDELINES FOR CHOOSING GOOD INPUT VALUES ###
r = stepV/299792.458 # put stepV in km/s/pix
w1 = w00 * np.power((1+r), np.arange(float(n)))
print('The new log-wavelength scale will span %d - %d A with stepsize %f km/s.' % (w1[0], w1[-1], stepV))
print(' ')
return w1, m, r
def read_one_specfile(infile = 'myspectrum.txt', isAPOGEE = False):
'''
Read in a single FITS or txt spectrum file
(Bare-bones version of read_specfiles, below)
Requires infile, isAPOGEE
Returns wave, spec
'''
if infile[-3:] == 'txt':
try:
wave, spec = np.loadtxt(open(infile), comments='#', usecols=(0,1), unpack=True)
print('Text file {0}, isAPOGEE = {1}'.format(infile[-15:], isAPOGEE))
except:
raise FileNotFoundError('The file {0} was not found or cannot be opened'.format(infile))
if isAPOGEE == True: # we need to sort by wavelength
spec = spec[np.argsort(wave)]
wave = wave[np.argsort(wave)]
elif infile[-4:] == 'fits' or infile[-4:] == 'FITS':
# assume it's a FITS file
# Read in the FITS file with all the data in the primary HDU
try:
hdu = fits.open(infile)
except:
print('{0} not found or cannot be opened'.format(infile))
else:
head = hdu[0].header
try: datetime = head['date-obs']
except: datetime = head['date']
print('FITS file {0}, isAPOGEE = {1}, header date {2}'.format(infile[-17:], isAPOGEE, datetime))
if isAPOGEE == True: # APOGEE: the data is in a funny place and backwards
spec = hdu[1].data
spec = spec.flatten()
spec = spec[::-1]
else: # non-APOGEE (regular) option
spec = hdu[0].data
# Define the original wavelength scale
if isAPOGEE == True: # APOGEE: read wavelength values straight from FITS file
wave = hdu[4].data
wave = wave.flatten()
wave = wave[::-1]
else: # non-APOGEE (linear): create wavelength values from header data
headerdwave = head['cdelt1']
headerwavestart = head['crval1']
headerwavestop = headerwavestart + headerdwave*len(spec)
wave = np.arange(headerwavestart, headerwavestop, headerdwave)
if len(wave) != len(spec): # The wave array is sometimes 1 longer than it should be?
minlength = min(len(wave), len(spec))
wave = wave[0:minlength]
spec = spec[0:minlength]
try: # check to see if we have a file with log angstroms
logcheck = head['dispunit']
except:
logcheck = 'linear' # assume linear if no 'dispunit' is in header
if logcheck == 'log angstroms':
wave = np.power(10,wave) # make it linear
else:
print('File does not end in \'txt\' or \'fits\', no spectrum loaded.')
wave = []; spec = []
return wave, spec
def read_specfiles(infiles = 'infiles_BF.txt', bjdinfile = 'bjds_baryvels.txt', isAPOGEE = False):
'''
Read in some FITS or TXT files that are spectra and may or may not be APOGEE
Requires infiles, bjdinfile, isAPOGEE
Returns nspec, filenamelist, datetimelist, wavelist, speclist
'''
f1 = open(infiles)
print('Reading the files listed in %s' % infiles)
#print('The first one had better be your template spectrum.')
print(' ')
speclist = []; wavelist = []
filenamelist = []; datetimelist = []
if isAPOGEE == False:
checkAPOGEE = True #notallinfiles are APOGEE, but let's check in case *some* are
else:
checkAPOGEE = False #all the infiles are APOGEE so we don't have to search
i = 0
for line in f1: # This loop happens once for each spectrum
infile = line.rstrip()
if checkAPOGEE == True: # check to see if a subset of infiles are from APOGEE or not
if 'apogee' in infile or 'APOGEE' in infile: isAPOGEE = True
else: isAPOGEE = False
if infile[-4:] == 'fits' or infile[-4:] == 'FITS':
# assume it's a FITS file
try:
hdu = fits.open(infile)
head = hdu[0].header
filenamelist.append(infile)
try: datetime = head['date-obs']
except: datetime = head['date']
datetimelist.append(Time(datetime, scale='utc', format='isot'))
print('FITS file {0}, isAPOGEE = {1}, header date {2}'.format(infile[-17:], isAPOGEE, datetime))
except:
raise FileNotFoundError('The file {0} was not found or cannot be opened'.format(infile))
# it's time to dig out the spectral (flux) data and the wavelength scale!
if isAPOGEE == True: # APOGEE: the data is in a funny place and backwards
wave, spec = ProcessAPOGEEFITS(hdu)
else: # not APOGEE
spec = hdu[0].data # hope the info we want is in the zeroth HDU
try:
headerdwave = head['cdelt1']
headerwavestart = head['crval1']
headerwavestop = headerwavestart + headerdwave*len(spec)
wave = np.arange(headerwavestart, headerwavestop, headerdwave)
except:
raise RuntimeError('Cannot find wavelength info in FITS header')
if len(wave) != len(spec): # the wave array is sometimes 1 longer than it should be?
minlength = min(len(wave), len(spec))
wave = wave[0:minlength]
spec = spec[0:minlength]
try: # check to see if we have a file with log angstroms
logcheck = head['dispunit']
except:
logcheck = 'linear' # hopefully, at least
if logcheck == 'log angstroms':
wave = np.power(10, wave) # make it linear
#spec = spec / np.median(spec) # WARNING really basic, possibly bad normalization
else: # treat it like a text file
filenamelist.append(infile)
datetime = np.loadtxt(bjdinfile, comments='#', usecols=(1,), unpack=True)[i]
datetimelist.append(Time(datetime, scale='utc', format='jd'))
try:
wave, spec = np.loadtxt(open(infile), comments='#', usecols=(0,1), unpack=True)
print('Text file {0}, isAPOGEE = {1}, bjdinfile date {2}'.format(infile[-17:], isAPOGEE, datetime))
except:
raise FileNotFoundError('The file {0} was not found or cannot be opened'.format(infile))
if isAPOGEE == True: # we need sort by wavelength, just in case it hasn't been
spec = spec[np.argsort(wave)]
wave = wave[np.argsort(wave)]
# if infile[0:5] == 'trans': # you have a model telluric spectrum in nm, not A
# print("Assuming this is a telluric spectrum in nm, not A, proceed with caution")
# wave = wave*10
# at the end of this mess, we have one file's WAVE and corresponding SPEC - save it!
wavelist.append(wave)
speclist.append(spec)
i = i + 1
# save the total number of spectra
nspec = i
f1.close()
return nspec, filenamelist, datetimelist, wavelist, speclist
def ProcessAPOGEEFITS(hdu):
'''
Turns an APOGEE FITS hdu into a pair of wavelength and spectrum ndarrays
'''
spec = hdu[1].data
spec = spec.flatten()
spec = spec[::-1]
spec = spec / np.median(spec) # WARNING really basic, possibly bad normalization
wave = hdu[4].data
wave = wave.flatten()
wave = wave[::-1]
return wave, spec
def gaussparty(gausspars, nspec, filenamelist, bfsmoothlist, bf_ind, amplimits, threshold, widlimits):
'''
Fits 2 or 3 gaussians to some data
'''
param = []
with open(gausspars) as f1:
for line in f1:
if line[0] != '#':
param.append( line.rstrip() )
#param = np.loadtxt(gausspars, comments='#')
bffitlist = []
bffitlist.append(0)
gauss1 = [[] for i in range(nspec)]
gauss2 = [[] for i in range(nspec)]
gauss3 = [[] for i in range(nspec)]
gauss1[0] = [0,0]
gauss2[0] = [0,0]
gauss3[0] = [0,0]
error_array = np.ones(len(bfsmoothlist[0]))*0.01 # dummy array with 0.01 error values
print(' ')
print('Gaussian fit results: peak amplitude, width, rvraw, rvraw_err')
print ('-------------------------------------------------------------')
for i in range(1, nspec):
# check to see if we are fitting a third gaussian, i.e., one near zero
# don't print out the result of this fit, but do return it for plotting
# handle comments in gausspars file without exploding
if '#' in param[i]:
commentbegin = param[i].find('#')
partest = param[i][0:commentbegin].split()
else:
partest = param[i].split()
if len(partest) == 6: ngauss = 2
elif len(partest) == 9: ngauss = 3
else: print('something is wrong with your gausspars file!')
# min and max pars for peak 1: amp, rv, width
#minpars=[0.8, float(partest[1])-threshold, 0]
#maxpars=[1.0, float(partest[1])+threshold, 7]
# | |
{ '@guid':6, ':value':'f'} ] } ]
# at every stage, if we see a dict we take only the first result
# (we print a complaint if we see another)
# if we see a list we put all the results into that list
if isinstance(query, dict):
if len(result) == 0:
return None
elif len(result) > 1:
guids = [('#' + x[0]) for x in result]
raise MQLResultError(
query,
'Expected one result, got %(count)d',
count=len(result),
guids=guids)
else:
return query.node.create_results(result[0], ResultDict(), mode)
elif isinstance(query, list):
resultv = []
for res in result:
resultv.append(query[0].node.create_results(res, ResultDict(), mode))
return resultv
else:
raise MQLInternalError(query, 'Query not dict or list')
def sanitize_value(self, value, datatype, varenv):
# this code is a copy of the code in env.py unquote_value()
# which should be considered authorative.
if varenv.get('escape', 'html') and isinstance(value, str):
if datatype == 'url':
if value.find('javascript:') == 0:
value = 'unsafe-' + value
else:
value = cgi.escape(value)
return value
def filter_query_result(self, result, varenv):
"""
make the result look like the query.
note that the result is already structurally isomorphic to
the query; this just converts guids and removes extra
fields.
"""
if isinstance(result, list):
filter_result = []
for elem in result:
# need this pointer to get index results properly sorted.
elem.list = result
filter_result.append(self.filter_query_result(elem, varenv))
elif isinstance(result, dict):
filter_result = {}
for key, asked in result.query.original_query.iteritems():
if key[0] in '@:':
basekey = key[1:]
if basekey == 'id':
filter_result[key] = asked
# horrible hack to collect up the guids we care about...
if asked is None:
varenv.guid_list.append(result[key[0] + 'guid'])
elif (basekey in QueryPrimitive.directives or
basekey in QueryPrimitive.special):
# should we output these?
filter_result[key] = asked
elif key[0] == '@' and result.query.get(
'@optional') and key not in result:
# XXX here we actually will give you an empty result
# we could give you nothing at all
filter_result[key] = None
elif basekey == 'guid':
filter_result[key] = result[key]
elif basekey == 'value':
# sanitize results.
filter_result[key] = self.sanitize_value(
result[key], result[key[0] + 'datatype'], varenv)
elif basekey in QueryPrimitive.values:
# this better be what you said!!!
filter_result[key] = result[key]
elif basekey == 'index':
filter_result[key] = self.generate_index_read_result(result)
elif basekey in QueryPrimitive.pointers:
# might be direct sub-query or constraint, or query
if isinstance(asked, dict):
# sub-query, return it
filter_result[key] = self.filter_query_result(result[key], varenv)
else:
if asked is None:
# we'll be asking for the id of this thing, not just the guid.
varenv.lookup_manager.guid_list.append(result[key])
# just give back the guid
filter_result[key] = result[key]
elif valid_relname(key):
# skip optional results we didn't get a value for.
if result.query.get('@optional') and key not in result:
# XXX should we give you None as a result rather than leaving it out completely?
pass
else:
# is this a ResultError or an InternalError?
if key not in result:
raise MQLInternalError(
result.query, "No return result for '%(key)s'", key=key)
else:
filter_result[key] = self.filter_query_result(result[key], varenv)
elif key[0] == '?':
# it's possible that we didn't find any order information, so give back null in that case
filter_result[key] = result.get(key, None)
else:
raise MQLInternalError(
result.query,
"Didn't expect to see %(key)s in original query while filtering",
key=key)
result.filter = filter_result
elif result is None:
# there's no result here even though we expected one.
filter_result = result
else:
raise MQLInternalError(
result.query, "Didn't understand result", result=result)
return filter_result
def generate_index_read_result(self, result):
# bit of gymnastics to get the containing list of the result item we are passed
if ':index' not in result:
# must be the first element -- compute all the indexes.
values = {}
for elem in result.list:
if '?value' in elem:
if elem['?value'] not in values:
values[elem['?value']] = elem
else:
raise MQLInternalError(
result.query,
'Duplicate ordering value found in list',
value=elem['?value'])
else:
elem[':index'] = None
i = 0
for value in sorted(values.iterkeys()):
values[value][':index'] = i
i += 1
return result[':index']
def dispatch_prepares(self, query, varenv):
if isinstance(query, dict):
# eek
def reader(graphq):
dumplog('PREPARE', graphq)
try:
gresult = self.gc.read_varenv(graphq, varenv)
except EmptyResult:
# debug ME-907
LOG.exception(
'mql.lojson.LowQuery.dispatch_prepares()',
graphq=graphq,
varenv=varenv)
# there's an implicit unescapable optional-ness at the root of every query
gresult = []
dumplog('PREPARE_RESULT', gresult)
return gresult
query.node.run_prepare(reader)
elif isinstance(query, list):
for subq in query:
self.dispatch_prepares(subq, varenv)
#
# This code is too sloppy for security critical code -- I can't easily convince myself that
# all paths into generate_write_query() must necessarily go through this code.
# Probably we need to move this closer to generate_write_query() (which is not that
# easy to verify even without the write access issues.
#
def check_write_access(self, head_query, varenv):
# we have a prepared set of queries with attached results. We now check that you (the user) can create the
# appropriate nodes and links. All the nodes will get the permission specified in varenv['$permission'].
# all the links will get the permission of their left (if you are authorized in that permission)
# if you are not authorized, then the write will fail with "unauthorized".
scope.check_write_throttle(self, varenv)
for query in dict_recurse(head_query):
if query.key == '+has_permission':
continue
link = query.link
node = query.node
ordered = query.ordered
# first do the node -- the link decision may depend on the
# node decision (it is either the parent this node)
if node is not Missing:
if node.left not in (None, Missing):
raise MQLInternalError(
query,
"Found node with non-empty left -- AccessControl can't handle that!"
)
# make sure this isn't a side door into has_permission
# to cause trouble later.
if node.typeguid == self.lookup_boot_guid('/boot/has_permission',
varenv):
raise MQLInternalError(
query,
"Can't reference has_permission during a write regardless of how hard you try"
)
if node.state == 'create':
if node.guid is not None:
raise MQLInternalError(
query,
"Can't create a node which already has a guid",
guid=node.guid)
# now we need to find the has_permission contents
# and change the guid...
permissionguid = query['+has_permission'].node.guid
if permissionguid != varenv.default_permission_guid:
raise MQLInternalError(
query,
'Creating a node with something other than the default permission'
)
if scope.check_permission(
self, varenv, permissionguid=permissionguid):
# we'll need to create a link for this node.
query['+has_permission'].link.scope = varenv.attribution_guid
query['+has_permission'].link.access_control_ok = True
node.scope = varenv.attribution_guid
node.access_control_ok = True
else:
# *****************************************************************************************************************
raise MQLAccessError(
query,
'User %(user)s cannot create with permission %(permission)s',
user=varenv.get_user_id(),
permission=permissionguid)
# *****************************************************************************************************************
elif node.state == 'remove':
if node.guid in (None, Missing):
raise MQLInternalError(
query, 'Found node with guid=null but state=remove')
if node.scope in (None, Missing):
raise MQLInternalError(
query,
'Found node with guid %(guid)s but scope=null during check_write_access()',
guid=node.guid)
node_permission = query['+has_permission'].node.guid
if scope.check_permission(
self, varenv, permissionguid=node_permission):
node.scope = varenv.attribution_guid
node.access_control_ok = True
# we'll need to remove the permission link for this node.
query['+has_permission'].link.scope = varenv.attribution_guid
query['+has_permission'].link.access_control_ok = True
else:
# *****************************************************************************************************************
raise MQLAccessError(
query,
'User %(user)s does not have permission to destroy here',
user=varenv.get_user_id())
# *****************************************************************************************************************
elif node.state == 'found':
# set access_control_ok if it would be OK to write this node (even though we are not doing so)
node_permission = query['+has_permission'].node.guid
if scope.check_permission(
self, varenv, permissionguid=node_permission):
# node.scope is unchanged as we are not actually going to do the write...
node.access_control_ok = True
elif node.state == 'notpresent':
# not ok to write a link pointing to this missing node
pass
else:
raise MQLInternalError(
query,
'Found node with state %(state)s in check_write_access()',
state=node.state)
# now the link. link.left is either node, or link.parent (which we have already processed)
if link is not Missing:
if link.state in ('create', 'modify', 'remove'):
if link.left in (None, Missing):
raise MQLInternalError(
query,
"Found link with empty left -- AccessControl can't handle that!"
)
if link.state in ('create', 'modify') and link.guid:
raise MQLInternalError(
query,
'Found link with guid which we are trying to create',
guid=link.guid)
elif link.state == 'remove' and not link.guid:
raise MQLInternalError(
query, 'Found link without guid which we are trying to remove')
# link.left should have already been found, but may not have been processed
# XXX (this is probably a bad order - we should always process nodes before the links |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.