input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
select from available object types",
"Select type for object array", bG.objectArrayChoices)
if choice != "":
self.Save()
bG.insertObjectArray(p.element, p.graph, choice)
elif id == const.ID_InsertMenuDataDelayOfSelected:
self.Save()
bG.makeDelay(Xref.get(obj).elem, p.graph, 3)
elif id == const.ID_InsertMenuDataObjectArrayOfSelected:
self.Save()
bG.makeObjectArray(Xref.get(obj).elem, p.graph, 3)
elif id == const.ID_InsertMenuDataPyramidOfSelected:
self.Save()
bG.makePyramid(Xref.get(obj).elem, p.graph, 4)
elif id == const.ID_InsertMenuDataObjectArrayFromTensor:
pass
elif id in {const.ID_InsertMenuDataROI, const.ID_InsertMenuDataChannel}:
self.Save()
bG.insertObject(Xref.get(obj).elem, p.graph, idToTagMap[id])
elif id == const.ID_InsertMenuDataView:
pass
else:
tag = idToTagMap[id]
if tag is not None:
self.Save()
bG.insertObject(p.element, p.graph, tag)
self.Refresh()
class DrawPanel(wx.ScrolledWindow):
"""
Class to implement a drawing area for a graph on a tab
Member variables:
"""
def __init__(self, parent, topframe, elemGraph):
"""
Initialise DrawPanel object, binds mouse, paint, and key_down events
Notebook parent - the owning window, in this case
(design case) the wx.notebook to which
this has been added as a page
cvxMain.MainFrame topframe - the application main frame;gives
access to various objects required by
this window
tab Graph - supplied if a pre-existing graph is
to be shown
"""
wx.ScrolledWindow.__init__(self, parent, -1, style=wx.HSCROLL|wx.VSCROLL|wx.SUNKEN_BORDER)
self.topframe = topframe
self.parent = parent
self.SetBackgroundColour("WHITE")
self.SetScrollRate(20, 20)
self.Bind(wx.EVT_MOUSE_EVENTS, self.OnMouse)
self.Bind(wx.EVT_PAINT, self.OnPaintTab)
self.InitPos()
self.Update(elemGraph)
# Set up a dictionary for key event handling for this window
self.key_table = {(wx.ACCEL_NORMAL,
wx.WXK_RIGHT): self.MoveRight,
(wx.ACCEL_NORMAL,
wx.WXK_LEFT): self.MoveLeft,
(wx.ACCEL_NORMAL,
wx.WXK_DOWN): self.MoveDown,
(wx.ACCEL_NORMAL,
wx.WXK_UP): self.MoveUp,
(wx.ACCEL_SHIFT,
wx.WXK_RIGHT): lambda: self.MoveRight(False),
(wx.ACCEL_SHIFT,
wx.WXK_LEFT): lambda: self.MoveLeft(False),
(wx.ACCEL_SHIFT,
wx.WXK_DOWN): lambda: self.MoveDown(False),
(wx.ACCEL_SHIFT,
wx.WXK_UP): lambda: self.MoveUp(False),
(wx.ACCEL_NORMAL,
wx.WXK_PAGEDOWN): lambda: self.ScrollSome(0, 10),
(wx.ACCEL_NORMAL,
wx.WXK_PAGEUP): lambda: self.ScrollSome(0, -10),
(wx.ACCEL_SHIFT,
wx.WXK_PAGEDOWN): lambda: self.ScrollSome(10, 0),
(wx.ACCEL_SHIFT,
wx.WXK_PAGEUP): lambda: self.ScrollSome(-10, 0),
(wx.ACCEL_NORMAL,
wx.WXK_ESCAPE): self.SelectNone,
(wx.ACCEL_NORMAL,
wx.WXK_INSERT): self.insertNodeMenu,
(wx.ACCEL_SHIFT,
wx.WXK_INSERT): lambda: self.insertNodeMenu(True),
(wx.ACCEL_CTRL,
wx.WXK_INSERT): self.insertDataMenu,
(wx.ACCEL_NORMAL,
wx.WXK_MENU): lambda: self.topframe.properties.SetFocus()
}
# Bind KEY_DOWN
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
# Event handlers
def OnPaintTab(self, evt):
"""
called to update the visible area of the tab
Just draw the bitmap...
and the selection
wx.Event evt
"""
if Xref.isDirty():
# We need to rebuild the graphs
self.parent.BuildGraphs()
if Xref.isDirty(self.graph):
# We need to redraw
self.Redraw()
self.updateMenus()
self.updateProperties()
x, y = self.bitmap.GetSize()
scale = self.parent.zoom
self.SetVirtualSize(x * scale, y * scale)
dc = wx.PaintDC(self)
self.PrepareDC(dc)
dc.Clear()
dc.SetUserScale(scale, scale)
vr = self.getVisibleRect()
br = wx.Rect(0, 0, x, y)
br = br.Intersect(vr)
if br.width > 0 and br.height > 0:
dc.DrawBitmap(self.bitmap.GetSubBitmap(br), br.left, br.top)
if self.selectedRect is not None:
dc.SetBrush(wx.Brush('WHITE', style=wx.TRANSPARENT))
dc.SetPen(wx.Pen('GREEN', style=wx.DOT))
dc.DrawRectangle(self.selectedRect)
if self.downPos is not None and self.dragPos is not None:
dc.SetPen(wx.LIGHT_GREY_PEN)
dc.DrawLine(self.downPos, self.dragPos)
if self.upObj is not None and self.upObj != self.selectedObj:
dc.SetBrush(wx.Brush('WHITE', style=wx.TRANSPARENT))
dc.SetPen(wx.Pen('RED', style=wx.DOT))
dc.DrawRectangle(self.upRect)
def OnMouse(self, event):
"""
Mouse event handler
wx.Event event
Process mouse events:
Left button down
Left button up
Right button down
Right button up
Drag
Left button double-click
Right button double-click
"""
scale = self.parent.zoom
rawpos = self.CalcUnscrolledPosition(event.GetX(),event.GetY()) # Where on the screen the mouse is
mpos = (rawpos[0] / scale, rawpos[1] / scale)
self.dragPos = None
if event.LeftDown(): # normal mouse click - will select something
# Try and find an object under the mouse
self.downPos = mpos # Record where the button was clicked
self.selectedObj, self.selectedRect = self.ObjAtPos(mpos)
self.Refresh()
elif event.Dragging():
self.dragPos = mpos # Record current mouse drag position
# Process automatic scrolling
ppu = self.GetScrollPixelsPerUnit()
sx = event.GetX() / ppu[0]
sy = event.GetY() / ppu[1]
vs = self.GetViewStart()
sz = self.GetClientSize()
sz[0] = sx - sz[0] / ppu[0] + vs[0]
sz[1] = sy - sz[1] / ppu[1] + vs[1]
sx = vs[0] + sx if sx < 0 else sz[0] if sz[0] > vs[0] else vs[0]
sy = vs[1] + sy if sy < 0 else sz[1] if sz[1] > vs[1] else vs[1]
self.Scroll((sx,sy))
self.Refresh()
elif event.LeftUp(): # Action on leftUp depends upon what's selected
self.dragPos = None
self.upObj, self.upRect = self.ObjAtPos(mpos)
self.LeftMouse(event.ControlDown(), event.ShiftDown(), event.AltDown())
self.Refresh()
elif event.LeftDClick(): # Always preceded by a LeftUp()
pass
elif event.RightDown():
# Try and find an object under the mouse
self.downPos = mpos # Record where the button was clicked
self.selectedObj, self.selectedRect = self.ObjAtPos(mpos)
self.Refresh()
elif event.RightUp():
self.dragPos = None
self.upObj = None
self.upRect = None
self.contextMenu(event.ControlDown(), event.ShiftDown(), event.AltDown())
self.Refresh()
elif event.RightDClick(): # Always precede by a RightUp()
pass
else:
delta = - round(event.GetWheelRotation() / 60.0)
if delta:
if event.ShiftDown() or event.GetWheelAxis == wx.MOUSE_WHEEL_HORIZONTAL:
self.Scroll(self.GetViewStart() + wx.Point(delta, 0))
else:
self.Scroll(self.GetViewStart() + wx.Point(0, delta))
if event.ControlDown():
if delta > 0:
self.parent.zoom *= 1.4142135
else:
self.parent.zoom /= 1.4142135
self.Refresh()
def OnKeyDown(self, event):
"""
The KEY_DOWN event handler. Processes key down events for this window
wx.Event event
"""
ix = (event.GetModifiers(), event.GetKeyCode())
if ix in self.key_table:
self.key_table[ix]()
else:
event.Skip()
# Overridden methods
# New methods
def getData(self, tag, elem):
"""
return data from the xml object elem,
which should be have the given tag.
Return an list of values, each of which could be a dictionary or a list.
Notice that no structure type we use currently contain arrays or
structures; if this changes then we have to revisit this method.
For the moment, we ignore ptype except for chars and assume we have correct data.
The function is used for both scalars and arrays. We hope that
there is only one child for scalars (as there should be)...
"""
values = []
for child in elem:
d = bG.getData(child, tag)
if tag == 'user':
values.append(d)
elif isinstance(d, list):
values += d
elif d is not None:
values.append(d)
return values
def insertArrayDataProperty(self, props, pname, pxtype, pref, isArray=True, needData=True):
"""
Insert a scalar data property or multiple array data properties
with label pname, type ptype and data from object referenced by pref.
Notice that ptype may be either from the vx_type_e enumeration, or
a type name. The first thing we do is to convert it to a type name
"""
ptype = TypeDef.typeFromEnum(pxtype, pxtype)
elem = Xref.get(pref).elem
values = self.getData(TypeDef.tagFromEnum(elem.get(bG.selemType)), elem)
xobj = Xref.get(pref)
if ptype == 'vx_enum':
# Need to select a sub-type for this object
t = xobj.subtype # get last recorded sub-type for this object
if t is None:
# default the type to a number if we haven't had this before
t = ddefs.s_vx_uint32
self.insertEnumProperty(props, pname + ' subtype', ptype, t)
ptype = t
if isArray:
# We need to select the array size based upon current size
# with a maximum of the given capacity
cap = int(xobj.elem.get(bG.scapacity, "1"))
cursize = xobj.datasize # min(len(values), xobj.datasize)
self.insertUintProperty(props, pname + ' size', cursize, max=cap)
else:
cursize = 1
if needData and len(values) < cursize:
values += [TypeDef.defaultData(ptype)] * (cursize - len(values))
for i in range(cursize):
if isArray:
elname = "%s[%d]"%(pname, i)
else:
elname = pname
self.insertScalarDataProperty(props, elname, ptype, values[i], pref)
def insertScalarDataProperty(self, props, elname, ptype, value, pref):
"""
Insert a scalar data property with label pname, type ptype and data given in value
Notice that ptype is a type name.
pref is the reference of the object, required for vx_enum
"""
if isinstance(value, dict):
# it's a structure. Iterate through the definition
# getData will have provided the values in a dictionary
for fname, ftype in TypeDef.items(ptype):
print((fname, ftype))
self.insertScalarDataProperty(props, "%s.%s"%(elname, fname), ftype, value[fname], pref)
elif isinstance(value, list):
# it must be a user struct. Iterate through byte values
for i in range(len(value)):
self.insertUintProperty(props, "%s[%d]"%(elname, i), value[i], max=255)
elif ptype in {'vx_int8', 'vx_int16', 'vx_int32', 'vx_int64'}:
self.insertIntProperty(props, elname, value)
elif ptype in {'vx_uint8', 'vx_uint16', 'vx_uint32', 'vx_uint64', 'vx_size'}:
self.insertUintProperty(props, elname, value)
elif ptype in {'vx_float16', 'vx_float32', 'vx_float64'}:
self.insertFloatProperty(props, elname, value)
elif ptype == 'vx_char':
self.insertStringProperty(props, elname, value)
elif ptype == 'vx_bool':
props.Append(wx.propgrid.BoolProperty(label=elname, name=elname, value=value == 'true'))
elif ptype == 'vx_df_image':
self.insertEnumProperty(props, elname, 'vx_df_image_e', TypeDef.formatToId(value))
elif ptype == 'vx_enum':
# Need to select a sub-type for this object - it may be a field in a structure
# (Notice that will affect all vx_enum fields in the structure) TODO
t = Xref.get(pref).subtype # get last recorded sub-type for this object
if t is None:
# default the type to a number if we haven't had this before
t = ddefs.s_vx_uint32
self.insertEnumProperty(props, elname + ' subtype', ptype, t)
if t == ddefs.s_vx_uint32:
self.insertUintProperty(props, elname, value, max=0xFFFFFFFFFFFFFFFF)
| |
import copy
import logging as log
import os
from typing import Dict
import torch
import torch.nn as nn
from allennlp.modules import scalar_mix
import pytorch_transformers
from jiant.preprocess import parse_task_list_arg
from jiant.utils import utils
class PytorchTransformersEmbedderModule(nn.Module):
""" Shared code for pytorch_transformers wrappers.
Subclasses share a good deal of code, but have a number of subtle differences due to different
APIs from pytorch_transfromers.
"""
def __init__(self, args):
super(PytorchTransformersEmbedderModule, self).__init__()
self.cache_dir = os.getenv(
"PYTORCH_PRETRAINED_BERT_CACHE",
os.path.join(args.exp_dir, "pytorch_transformers_cache"),
)
utils.maybe_make_dir(self.cache_dir)
self.embeddings_mode = args.pytorch_transformers_output_mode
# Integer token indices for special symbols.
self._sep_id = None
self._cls_id = None
self._pad_id = None
# If set, treat these special tokens as part of input segments other than A/B.
self._SEG_ID_CLS = None
self._SEG_ID_SEP = None
def parameter_setup(self, args):
# Set trainability of this module.
for param in self.model.parameters():
param.requires_grad = bool(args.transfer_paradigm == "finetune")
self.num_layers = self.model.config.num_hidden_layers
if args.pytorch_transformers_max_layer >= 0:
self.max_layer = args.pytorch_transformers_max_layer
assert self.max_layer <= self.num_layers
else:
self.max_layer = self.num_layers
# Configure scalar mixing, ELMo-style.
if self.embeddings_mode == "mix":
if args.transfer_paradigm == "frozen":
log.warning(
"NOTE: pytorch_transformers_output_mode='mix', so scalar "
"mixing weights will be fine-tuned even if BERT "
"model is frozen."
)
# TODO: if doing multiple target tasks, allow for multiple sets of
# scalars. See the ELMo implementation here:
# https://github.com/allenai/allennlp/blob/master/allennlp/modules/elmo.py#L115
assert len(parse_task_list_arg(args.target_tasks)) <= 1, (
"pytorch_transformers_output_mode='mix' only supports a single set of "
"scalars (but if you need this feature, see the TODO in "
"the code!)"
)
# Always have one more mixing weight, for lexical layer.
self.scalar_mix = scalar_mix.ScalarMix(self.max_layer + 1, do_layer_norm=False)
def prepare_output(self, lex_seq, hidden_states, mask):
"""
Convert the output of the pytorch_transformers module to a vector sequence as expected by jiant.
args:
lex_seq: The sequence of input word embeddings as a tensor (batch_size, sequence_length, hidden_size).
Used only if embeddings_mode = "only".
hidden_states: A list of sequences of model hidden states as tensors (batch_size, sequence_length, hidden_size).
mask: A tensor with 1s in positions corresponding to non-padding tokens (batch_size, sequence_length).
"""
available_layers = hidden_states[: self.max_layer + 1]
if self.embeddings_mode in ["none", "top"]:
h = available_layers[-1]
elif self.embeddings_mode == "only":
h = lex_seq
elif self.embeddings_mode == "cat":
h = torch.cat([available_layers[-1], lex_seq], dim=2)
elif self.embeddings_mode == "mix":
h = self.scalar_mix(available_layers, mask=mask)
else:
raise NotImplementedError(f"embeddings_mode={self.embeddings_mode}" " not supported.")
# <float32> [batch_size, var_seq_len, output_dim]
return h
def get_output_dim(self):
if self.embeddings_mode == "cat":
return 2 * self.model.config.hidden_size
else:
return self.model.config.hidden_size
def get_seg_ids(self, token_ids):
""" Dynamically build the segment IDs for a concatenated pair of sentences
Searches for index _sep_id in the tensor. Supports BERT or XLNet-style padding.
Sets padding tokens to segment zero.
args:
token_ids (torch.LongTensor): batch of token IDs
returns:
seg_ids (torch.LongTensor): batch of segment IDs
example:
> sents = ["[CLS]", "I", "am", "a", "cat", ".", "[SEP]", "You", "like", "cats", "?", "[SEP]", "[PAD]"]
> token_tensor = torch.Tensor([[vocab[w] for w in sent]]) # a tensor of token indices
> seg_ids = get_seg_ids(token_tensor)
> assert seg_ids == torch.LongTensor([0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0])
"""
sep_idxs = (token_ids == self._sep_id).nonzero()[:, 1]
seg_ids = torch.zeros_like(token_ids)
for row_idx, row in enumerate(token_ids):
sep_idxs = (row == self._sep_id).nonzero()
seg = 0
prev_sep_idx = -1
for sep_idx in sep_idxs:
seg_ids[row_idx, prev_sep_idx + 1 : sep_idx + 1].fill_(seg)
seg = 1 - seg # Alternate.
prev_sep_idx = sep_idx
if self._SEG_ID_CLS is not None:
seg_ids[token_ids == self._cls_id] = self._SEG_ID_CLS
if self._SEG_ID_SEP is not None:
seg_ids[token_ids == self._sep_id] = self._SEG_ID_SEP
return seg_ids
class BertEmbedderModule(PytorchTransformersEmbedderModule):
""" Wrapper for BERT module to fit into jiant APIs. """
def __init__(self, args):
super(BertEmbedderModule, self).__init__(args)
self.model = pytorch_transformers.BertModel.from_pretrained(
args.input_module, cache_dir=self.cache_dir, output_hidden_states=True
)
tokenizer = pytorch_transformers.BertTokenizer.from_pretrained(
args.input_module, cache_dir=self.cache_dir, do_lower_case="uncased" in args.tokenizer
) # TODO: Speed things up slightly by reusing the previously-loaded tokenizer.
self._sep_id = tokenizer.convert_tokens_to_ids("[SEP]")
self._cls_id = tokenizer.convert_tokens_to_ids("[CLS]")
self._pad_id = tokenizer.convert_tokens_to_ids("[PAD]")
self.parameter_setup(args)
@staticmethod
def apply_boundary_tokens(s1, s2=None):
# BERT-style boundary token padding on string token sequences
if s2:
return ["[CLS]"] + s1 + ["[SEP]"] + s2 + ["[SEP]"]
else:
return ["[CLS]"] + s1 + ["[SEP]"]
def forward(
self, sent: Dict[str, torch.LongTensor], unused_task_name: str = ""
) -> torch.FloatTensor:
""" Run BERT to get hidden states.
This forward method does preprocessing on the go,
changing token IDs from preprocessed bert to
what AllenNLP indexes.
Args:
sent: batch dictionary
Returns:
h: [batch_size, seq_len, d_emb]
"""
assert "pytorch_transformers_wpm_pretokenized" in sent
# <int32> [batch_size, var_seq_len]
ids = sent["pytorch_transformers_wpm_pretokenized"]
# BERT supports up to 512 tokens; see section 3.2 of https://arxiv.org/pdf/1810.04805.pdf
assert ids.size()[1] <= 512
mask = ids != 0
# "Correct" ids to account for different indexing between BERT and
# AllenNLP.
# The AllenNLP indexer adds a '@@UNKNOWN@@' token to the
# beginning of the vocabulary, *and* treats that as index 1 (index 0 is
# reserved for padding).
ids[ids == 0] = self._pad_id + 2 # Shift the indices that were at 0 to become 2.
# Index 1 should never be used since the BERT WPM uses its own
# unk token, and handles this at the string level before indexing.
assert (ids > 1).all()
ids -= 2 # shift indices to match BERT wordpiece embeddings
if self.embeddings_mode not in ["none", "top"]:
# This is redundant with the lookup inside BertModel,
# but doing so this way avoids the need to modify the BertModel
# code.
# Extract lexical embeddings
lex_seq = self.model.embeddings.word_embeddings(ids)
lex_seq = self.model.embeddings.LayerNorm(lex_seq)
hidden_states = [] # dummy; should not be accessed.
# following our use of the OpenAI model, don't use dropout for
# probing. If you would like to use dropout, consider applying
# later on in the SentenceEncoder (see models.py).
# h_lex = self.model.embeddings.dropout(embeddings)
else:
lex_seq = None # dummy; should not be accessed.
if self.embeddings_mode != "only":
# encoded_layers is a list of layer activations, each of which is
# <float32> [batch_size, seq_len, output_dim]
token_types = self.get_seg_ids(ids)
_, output_pooled_vec, hidden_states = self.model(
ids, token_type_ids=token_types, attention_mask=mask
)
# <float32> [batch_size, var_seq_len, output_dim]
return self.prepare_output(lex_seq, hidden_states, mask)
class XLNetEmbedderModule(PytorchTransformersEmbedderModule):
""" Wrapper for XLNet module to fit into jiant APIs. """
def __init__(self, args):
super(XLNetEmbedderModule, self).__init__(args)
self.model = pytorch_transformers.XLNetModel.from_pretrained(
args.input_module, cache_dir=self.cache_dir, output_hidden_states=True
)
tokenizer = pytorch_transformers.XLNetTokenizer.from_pretrained(
args.input_module, cache_dir=self.cache_dir, do_lower_case="uncased" in args.tokenizer
) # TODO: Speed things up slightly by reusing the previously-loaded tokenizer.
self._sep_id = tokenizer.convert_tokens_to_ids("<sep>")
self._cls_id = tokenizer.convert_tokens_to_ids("<cls>")
self._pad_id = tokenizer.convert_tokens_to_ids("<pad>")
self._unk_id = tokenizer.convert_tokens_to_ids("<unk>")
self.parameter_setup(args)
# Segment IDs for CLS and SEP tokens. Unlike in BERT, these aren't part of the usual 0/1 input segments.
# Standard constants reused from pytorch_transformers. They aren't actually used within the pytorch_transformers code, so we're reproducing them here in case they're removed in a later cleanup.
self._SEG_ID_CLS = 2
self._SEG_ID_SEP = 3
@staticmethod
def apply_boundary_tokens(s1, s2=None):
# XLNet-style boundary token marking on string token sequences
if s2:
return s1 + ["<sep>"] + s2 + ["<sep>", "<cls>"]
else:
return s1 + ["<sep>", "<cls>"]
def forward(
self, sent: Dict[str, torch.LongTensor], unused_task_name: str = ""
) -> torch.FloatTensor:
""" Run XLNet to get hidden states.
This forward method does preprocessing on the go,
changing token IDs from preprocessed word pieces to
what AllenNLP indexes.
Args:
sent: batch dictionary
Returns:
h: [batch_size, seq_len, d_emb]
"""
assert "pytorch_transformers_wpm_pretokenized" in sent
# <int32> [batch_size, var_seq_len]
# Make a copy so our padding modifications below don't impact masking decisions elsewhere.
ids = copy.deepcopy(sent["pytorch_transformers_wpm_pretokenized"])
mask = ids != 0
# "Correct" ids to account for different indexing between XLNet and
# AllenNLP.
# The AllenNLP indexer adds a '@@UNKNOWN@@' token to the
# beginning of the vocabulary, *and* treats that as index 1 (index 0 is
# reserved for native padding).
ids[ids == 0] = self._pad_id + 2 # Rewrite padding indices.
ids[ids == 1] = self._unk_id + 2 # Rewrite UNK indices.
ids -= 2 # shift indices to match XLNet wordpiece embeddings
if self.embeddings_mode not in ["none", "top"]:
# This is redundant with the lookup inside XLNetModel,
# but doing so this way avoids the need to modify the XLNetModel
# code.
lex_seq = self.model.word_embedding(ids)
hidden_states = [] # dummy; should not be accessed.
# following our | |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import numpy as np
from collections import namedtuple
import paddle
import paddle.fluid as fluid
from .utils.utils import get_paddle_version, remove_model_fn
pd_ver = get_paddle_version()
if pd_ver == 185:
from .layers_old import SuperConv2D, SuperLinear
Layer = paddle.fluid.dygraph.Layer
DataParallel = paddle.fluid.dygraph.DataParallel
else:
from .layers import SuperConv2D, SuperLinear
Layer = paddle.nn.Layer
DataParallel = paddle.DataParallel
from .layers_base import BaseBlock
from .utils.utils import search_idx
from ...common import get_logger
from ...core import GraphWrapper, dygraph2program
from .get_sub_model import get_prune_params_config, prune_params, check_search_space
import random
_logger = get_logger(__name__, level=logging.INFO)
__all__ = ['OFA', 'RunConfig', 'DistillConfig']
RunConfig = namedtuple(
'RunConfig',
[
# int, batch_size in training, used to get current epoch, default: None
'train_batch_size',
# list, the number of epoch of every task in training, default: None
'n_epochs',
# list, initial learning rate of every task in traning, NOT used now. Default: None.
'init_learning_rate',
# int, total images of train dataset, used to get current epoch, default: None
'total_images',
# list, elactic depth of the model in training, default: None
'elastic_depth',
# list, the number of sub-network to train per mini-batch data, used to get current epoch, default: None
'dynamic_batch_size'
])
RunConfig.__new__.__defaults__ = (None, ) * len(RunConfig._fields)
DistillConfig = namedtuple(
'DistillConfig',
[
# float, lambda scale of distillation loss, default: None.
'lambda_distill',
# instance of model, instance of teacher model, default: None.
'teacher_model',
# list(str), name of the layers which need a distillation, default: None.
'mapping_layers',
# str, the path of teacher pretrained model, default: None.
'teacher_model_path',
# instance of loss layer, the loss function used in distillation, if set to None, use mse_loss default, default: None.
'distill_fn',
# str, define which op append between teacher model and student model used in distillation, choice in ['conv', 'linear', None], default: None.
'mapping_op'
])
DistillConfig.__new__.__defaults__ = (None, ) * len(DistillConfig._fields)
class OFABase(Layer):
def __init__(self, model):
super(OFABase, self).__init__()
self.model = model
self._ofa_layers, self._elastic_task, self._key2name, self._layers = self.get_layers(
)
def get_layers(self):
ofa_layers = dict()
layers = dict()
key2name = dict()
elastic_task = set()
model_to_traverse = self.model._layers if isinstance(
self.model, DataParallel) else self.model
for name, sublayer in model_to_traverse.named_sublayers():
if isinstance(sublayer, BaseBlock):
sublayer.set_supernet(self)
if not sublayer.fixed:
ofa_layers[name] = sublayer.candidate_config
layers[sublayer.key] = sublayer.candidate_config
key2name[sublayer.key] = name
for k in sublayer.candidate_config.keys():
elastic_task.add(k)
return ofa_layers, elastic_task, key2name, layers
def forward(self, *inputs, **kwargs):
raise NotImplementedError
def layers_forward(self, block, *inputs, **kwargs):
if getattr(self, 'current_config', None) != None:
### if block is fixed, donnot join key into candidate
### concrete config as parameter in kwargs
if block.fixed == False:
assert self._key2name[
block.
key] in self.current_config, 'DONNT have {} layer in config.'.format(
self._key2name[block.key])
config = self.current_config[self._key2name[block.key]]
else:
config = dict()
config.update(kwargs)
else:
config = dict()
logging.debug(self.model, config)
return block.fn(*inputs, **config)
@property
def ofa_layers(self):
return self._ofa_layers
@property
def layers(self):
return self._layers
class OFA(OFABase):
"""
Convert the training progress to the Once-For-All training progress, a detailed description in the paper: `Once-for-All: Train One Network and Specialize it for Efficient Deployment<https://arxiv.org/abs/1908.09791>`_ . This paper propose a training propgress named progressive shrinking (PS), which means we start with training the largest neural network with the maximum kernel size (i.e., 7), depth (i.e., 4), and width (i.e., 6). Next, we progressively fine-tune the network to support smaller sub-networks by gradually adding them into the sampling space (larger sub-networks may also be sampled). Specifically, after training the largest network, we first support elastic kernel size which can choose from {3, 5, 7} at each layer, while the depth and width remain the maximum values. Then, we support elastic depth and elastic width sequentially.
Parameters:
model(paddle.nn.Layer): instance of model.
run_config(paddleslim.ofa.RunConfig, optional): config in ofa training, can reference `<>`_ . Default: None.
distill_config(paddleslim.ofa.DistillConfig, optional): config of distilltion in ofa training, can reference `<>`_. Default: None.
elastic_order(list, optional): define the training order, if it set to None, use the default order in the paper. Default: None.
train_full(bool, optional): whether to train the largest sub-network only. Default: False.
Examples:
.. code-block:: python
from paddle.vision.models import mobilenet_v1
from paddleslim.nas.ofa import OFA
from paddleslim.nas.ofa.convert_super import Convert, supernet
model = mobilenet_v1()
sp_net_config = supernet(kernel_size=(3, 5, 7), expand_ratio=[1, 2, 4])
sp_model = Convert(sp_net_config).convert(model)
ofa_model = OFA(sp_model)
"""
def __init__(self,
model,
run_config=None,
distill_config=None,
elastic_order=None,
train_full=False,
candidate_depth_list=None
):
super(OFA, self).__init__(model)
self.net_config = None
self.run_config = run_config
self.distill_config = distill_config
self.elastic_order = elastic_order
self.train_full = train_full
self.iter = 0
self.dynamic_iter = 0
self.manual_set_task = False
self.task_idx = 0
self._add_teacher = False
self.netAs_param = []
self._mapping_layers = None
self._build_ss = False
self._broadcast = False
self.candidate_depth_list = candidate_depth_list
self.depth_list_manual = None
### if elastic_order is none, use default order
if self.elastic_order is not None:
assert isinstance(self.elastic_order,
list), 'elastic_order must be a list'
if getattr(self.run_config, 'elastic_depth', None) != None:
depth_list = list(set(self.run_config.elastic_depth))
depth_list.sort()
self._ofa_layers['depth'] = depth_list
if self.elastic_order is None:
self.elastic_order = []
# zero, elastic resulotion, write in demo
# first, elastic kernel size
if 'kernel_size' in self._elastic_task:
self.elastic_order.append('kernel_size')
# second, elastic depth, such as: list(2, 3, 4)
if getattr(self.run_config, 'elastic_depth', None) != None:
depth_list = list(set(self.run_config.elastic_depth))
depth_list.sort()
self._ofa_layers['depth'] = depth_list
self.elastic_order.append('depth')
# final, elastic width
if 'expand_ratio' in self._elastic_task:
self.elastic_order.append('width')
if 'channel' in self._elastic_task and 'width' not in self.elastic_order:
self.elastic_order.append('width')
if getattr(self.run_config, 'n_epochs', None) != None:
assert len(self.run_config.n_epochs) == len(self.elastic_order)
for idx in range(len(run_config.n_epochs)):
assert isinstance(
run_config.n_epochs[idx],
list), "each candidate in n_epochs must be list"
if self.run_config.dynamic_batch_size != None:
assert len(self.run_config.n_epochs) == len(
self.run_config.dynamic_batch_size)
if self.run_config.init_learning_rate != None:
assert len(self.run_config.n_epochs) == len(
self.run_config.init_learning_rate)
for idx in range(len(run_config.n_epochs)):
assert isinstance(
run_config.init_learning_rate[idx], list
), "each candidate in init_learning_rate must be list"
### ================= add distill prepare ======================
if self.distill_config != None:
self._add_teacher = True
self._prepare_distill()
#self.model.train()
def _prepare_distill(self):
if self.distill_config.teacher_model == None:
logging.error(
'If you want to add distill, please input instance of teacher model'
)
### instance model by user can input super-param easily.
assert isinstance(self.distill_config.teacher_model, Layer)
# load teacher parameter
if self.distill_config.teacher_model_path != None:
param_state_dict, _ = paddle.load_dygraph(
self.distill_config.teacher_model_path)
self.distill_config.teacher_model.set_dict(param_state_dict)
self.ofa_teacher_model = OFABase(self.distill_config.teacher_model)
self.ofa_teacher_model.model.eval()
# add hook if mapping layers is not None
# if mapping layer is None, return the output of the teacher model,
# if mapping layer is NOT None, add hook and compute distill loss about mapping layers.
mapping_layers = getattr(self.distill_config, 'mapping_layers', None)
if mapping_layers != None:
if isinstance(self.model, DataParallel):
for idx, name in enumerate(mapping_layers):
if name[:7] != '_layers':
mapping_layers[idx] = '_layers.' + name
self._mapping_layers = mapping_layers
self.netAs = []
for name, sublayer in self.model.named_sublayers():
if name in self._mapping_layers:
if self.distill_config.mapping_op != None:
if self.distill_config.mapping_op.lower() == 'conv2d':
netA = SuperConv2D(
getattr(sublayer, '_num_filters',
sublayer._out_channels),
getattr(sublayer, '_num_filters',
sublayer._out_channels), 1)
elif self.distill_config.mapping_op.lower() == 'linear':
netA = SuperLinear(
getattr(sublayer, '_output_dim',
sublayer._out_features),
getattr(sublayer, '_output_dim',
sublayer._out_features))
else:
raise NotImplementedError(
"Not Support Op: {}".format(
self.distill_config.mapping_op.lower()))
else:
netA = None
if netA != None:
self.netAs_param.extend(netA.parameters())
self.netAs.append(netA)
def _reset_hook_before_forward(self):
self.Tacts, self.Sacts = {}, {}
if self._mapping_layers != None:
def get_activation(mem, name):
def get_output_hook(layer, input, output):
mem[name] = output
return get_output_hook
def add_hook(net, mem, mapping_layers):
for idx, (n, m) in enumerate(net.named_sublayers()):
if n in mapping_layers:
m.register_forward_post_hook(get_activation(mem, n))
add_hook(self.model, self.Sacts, self._mapping_layers)
add_hook(self.ofa_teacher_model.model, self.Tacts,
self._mapping_layers)
def _compute_epochs(self):
if getattr(self, 'epoch', None) == None:
assert self.run_config.total_images is not None, \
"if not use set_epoch() to set epoch, please set total_images in run_config."
assert self.run_config.train_batch_size is not None, \
"if not use set_epoch() to set epoch, please set train_batch_size in run_config."
assert self.run_config.n_epochs is not None, \
"if not use set_epoch() to set epoch, please set n_epochs in run_config."
self.iter_per_epochs = self.run_config.total_images // self.run_config.train_batch_size
epoch = self.iter // self.iter_per_epochs
else:
epoch = self.epoch
return epoch
def _sample_from_nestdict(self, cands, sample_type, task, phase):
sample_cands = dict()
for k, v in cands.items():
if isinstance(v, dict):
sample_cands[k] = self._sample_from_nestdict(
v, sample_type=sample_type, | |
"""
This module tracks and validates the status of Genomics Pipeline Subprocesses.
"""
import logging
from datetime import datetime
import pytz
from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
from rdr_service import clock, config
from rdr_service.api_util import list_blobs
from rdr_service.cloud_utils.gcp_cloud_tasks import GCPCloudTask
from rdr_service.config import (
GAE_PROJECT,
GENOMIC_GC_METRICS_BUCKET_NAME,
getSetting,
getSettingList,
GENOME_TYPE_ARRAY,
MissingConfigException,
RDR_SLACK_WEBHOOKS
)
from rdr_service.dao.bq_genomics_dao import bq_genomic_job_run_update, bq_genomic_file_processed_update, \
bq_genomic_manifest_file_update, bq_genomic_manifest_feedback_update, \
bq_genomic_gc_validation_metrics_batch_update, bq_genomic_set_member_batch_update, \
bq_genomic_gc_validation_metrics_update
from rdr_service.genomic.genomic_data_quality_components import ReportingComponent
from rdr_service.genomic.genomic_mappings import raw_aw1_to_genomic_set_member_fields, \
raw_aw2_to_genomic_set_member_fields, genomic_data_file_mappings, genome_centers_id_from_bucket_array, \
wgs_file_types_attributes, array_file_types_attributes
from rdr_service.genomic.genomic_set_file_handler import DataError
from rdr_service.genomic.genomic_state_handler import GenomicStateHandler
from rdr_service.model.genomics import GenomicManifestFile, GenomicManifestFeedback, GenomicIncident, \
GenomicGCValidationMetrics, GenomicInformingLoop, GenomicGcDataFile
from rdr_service.genomic_enums import GenomicJob, GenomicWorkflowState, GenomicSubProcessStatus, \
GenomicSubProcessResult, GenomicIncidentCode, GenomicManifestTypes
from rdr_service.genomic.genomic_job_components import (
GenomicFileIngester,
GenomicReconciler,
GenomicBiobankSamplesCoupler,
ManifestCompiler,
)
from rdr_service.dao.genomics_dao import (
GenomicFileProcessedDao,
GenomicJobRunDao,
GenomicManifestFileDao,
GenomicManifestFeedbackDao,
GenomicIncidentDao,
GenomicSetMemberDao,
GenomicAW1RawDao,
GenomicAW2RawDao,
GenomicGCValidationMetricsDao,
GenomicInformingLoopDao,
GenomicGcDataFileDao,
GenomicGcDataFileMissingDao,
GcDataFileStagingDao)
from rdr_service.resource.generators.genomics import genomic_job_run_update, genomic_file_processed_update, \
genomic_manifest_file_update, genomic_manifest_feedback_update, genomic_gc_validation_metrics_batch_update, \
genomic_set_member_batch_update
from rdr_service.services.email_service import Email, EmailService
from rdr_service.services.slack_utils import SlackMessageHandler
class GenomicJobController:
"""This class controls the tracking of Genomics subprocesses"""
def __init__(
self,
job_id,
bucket_name=GENOMIC_GC_METRICS_BUCKET_NAME,
sub_folder_name=None,
sub_folder_tuple=None,
archive_folder_name=None,
bucket_name_list=None,
storage_provider=None,
bq_project_id=None,
task_data=None,
server_config=None,
max_num=None
):
self.job_id = job_id
self.job_run = None
self.bucket_name = getSetting(bucket_name, default="")
self.sub_folder_name = getSetting(sub_folder_name, default=sub_folder_name)
self.sub_folder_tuple = sub_folder_tuple
self.bucket_name_list = getSettingList(bucket_name_list, default=[])
self.archive_folder_name = archive_folder_name
self.bq_project_id = bq_project_id
self.task_data = task_data
self.bypass_record_count = False
self.skip_updates = False
self.server_config = server_config
self.subprocess_results = set()
self.job_result = GenomicSubProcessResult.UNSET
self.last_run_time = datetime(2019, 11, 5, 0, 0, 0)
self.max_num = max_num
self.member_ids_for_update = []
self.manifests_generated = []
# Components
self.job_run_dao = GenomicJobRunDao()
self.file_processed_dao = GenomicFileProcessedDao()
self.manifest_file_dao = GenomicManifestFileDao()
self.manifest_feedback_dao = GenomicManifestFeedbackDao()
self.incident_dao = GenomicIncidentDao()
self.metrics_dao = GenomicGCValidationMetricsDao()
self.member_dao = GenomicSetMemberDao()
self.informing_loop_dao = GenomicInformingLoopDao()
self.missing_files_dao = GenomicGcDataFileMissingDao()
self.ingester = None
self.file_mover = None
self.reconciler = None
self.biobank_coupler = None
self.manifest_compiler = None
self.staging_dao = None
self.storage_provider = storage_provider
self.genomic_alert_slack = SlackMessageHandler(
webhook_url=config.getSettingJson(RDR_SLACK_WEBHOOKS).get('rdr_genomic_alerts')
)
def __enter__(self):
logging.info(f'Beginning {self.job_id.name} workflow')
self.job_run = self._create_run(self.job_id)
self.last_run_time = self._get_last_successful_run_time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._end_run()
def insert_genomic_manifest_file_record(self):
"""
Inserts genomic_manifest_file record from _file_data dict
:return: GenomicManifestFile object
"""
# Set attributes for GenomicManifestFile
now = datetime.utcnow()
try:
_uploadDate = self.task_data.file_data.upload_date
_manifest_type = self.task_data.file_data.manifest_type
_file_path = self.task_data.file_data.file_path
except AttributeError:
raise AttributeError("upload_date, manifest_type, and file_path required")
manifest_file = self.manifest_file_dao.get_manifest_file_from_filepath(_file_path)
if manifest_file is None:
path_list = _file_path.split('/')
file_to_insert = GenomicManifestFile(
created=now,
modified=now,
uploadDate=_uploadDate,
manifestTypeId=_manifest_type,
filePath=_file_path,
bucketName=path_list[0],
recordCount=0, # Initializing with 0, counting records when processing file
rdrProcessingComplete=0,
fileName=path_list[-1]
)
manifest_file = self.manifest_file_dao.insert(file_to_insert)
bq_genomic_manifest_file_update(manifest_file.id, self.bq_project_id)
genomic_manifest_file_update(manifest_file.id)
return manifest_file
def insert_genomic_manifest_feedback_record(self, manifest_file):
"""
Inserts run record from _file_data dict
:param manifest_file: JSONObject
:return: GenomicManifestFeedbackobject
"""
# Set attributes for GenomicManifestFile
now = datetime.utcnow()
feedback_file = self.manifest_feedback_dao.get_feedback_record_from_manifest_id(manifest_file.id)
if feedback_file is None:
feedback_to_insert = GenomicManifestFeedback(
created=now,
modified=now,
inputManifestFileId=manifest_file.id,
feedbackRecordCount=0,
feedbackComplete=0,
ignore=0,
)
feedback_file = self.manifest_feedback_dao.insert(feedback_to_insert)
bq_genomic_manifest_feedback_update(feedback_file.id, self.bq_project_id)
genomic_manifest_feedback_update(feedback_file.id)
return feedback_file
def get_feedback_records_to_send(self, _num=60):
"""
Retrieves genomic_manifest_feedback records that are past _num.
:return: list of GenomicManifestFeedback records
"""
return self.manifest_feedback_dao.get_feedback_records_past_date_cutoff(num_days=_num)
def get_aw2f_remainder_records(self):
"""
Retrieves genomic_manifest_feedback records that have already been sent
but have remaining data to send
:return: list of GenomicManifestFeedback records
"""
ids = self.manifest_feedback_dao.get_contamination_remainder_feedback_ids()
return self.manifest_feedback_dao.get_feedback_records_from_ids(ids)
def ingest_awn_data_for_member(self, file_path, member):
"""
Executed from genomic tools. Ingests data for a single GenomicSetMember
Currently supports AW1 and AW2
:param file_path:
:param member:
:return:
"""
print(f"Ingesting member ID {member.id} data for file: {file_path}")
# Get max file-processed ID for filename
file_processed = self.file_processed_dao.get_max_file_processed_for_filepath(file_path)
if file_processed is not None:
# Use ingester to ingest 1 row from file
self.ingester = GenomicFileIngester(job_id=self.job_id,
job_run_id=self.job_run.id,
_controller=self,
target_file=file_path[1:]) # strip leading "/"
self.ingester.file_obj = file_processed
self.job_result = GenomicSubProcessResult.SUCCESS
if self.job_id == GenomicJob.AW1_MANIFEST:
self.job_result = self.ingester.ingest_single_aw1_row_for_member(member)
if self.job_id == GenomicJob.METRICS_INGESTION:
self.job_result = self.ingester.ingest_single_aw2_row_for_member(member)
else:
print(f'No file processed IDs for {file_path}')
def ingest_gc_metrics(self):
"""
Uses ingester to ingest files.
"""
try:
logging.info('Running Validation Metrics Ingestion Workflow.')
for gc_bucket_name in self.bucket_name_list:
for folder in self.sub_folder_tuple:
self.sub_folder_name = config.getSetting(folder)
self.ingester = GenomicFileIngester(job_id=self.job_id,
job_run_id=self.job_run.id,
bucket=gc_bucket_name,
sub_folder=self.sub_folder_name,
_controller=self)
self.subprocess_results.add(
self.ingester.generate_file_queue_and_do_ingestion()
)
self.job_result = self._aggregate_run_results()
except RuntimeError:
self.job_result = GenomicSubProcessResult.ERROR
def ingest_specific_manifest(self, filename):
"""
Uses GenomicFileIngester to ingest specific Manifest file.
"""
try:
self.ingester = GenomicFileIngester(job_id=self.job_id,
job_run_id=self.job_run.id,
bucket=self.bucket_name,
target_file=filename,
_controller=self)
self.job_result = self.ingester.generate_file_queue_and_do_ingestion()
except RuntimeError:
self.job_result = GenomicSubProcessResult.ERROR
def ingest_member_ids_from_awn_raw_table(self, member_ids):
"""
Pulls data from genomic_aw1_raw or genomic_aw2_raw based on the value
of self.job_id.
In the case of GenomicJob.AW1_MANIFEST, this loads AW1 data to genomic_set_member.
In the case of GenomicJob.METRICS_INGESTION, this loads AW2 data to genomic_set_member
and genomic_gc_validation_metrics.
:param member_ids: list of genomic_set_member_ids to ingest
:return: ingestion results as string
"""
if self.job_id not in [GenomicJob.AW1_MANIFEST, GenomicJob.METRICS_INGESTION]:
raise AttributeError(f"{self.job_id.name} is invalid for this workflow")
if self.job_id == GenomicJob.AW1_MANIFEST:
raw_dao = GenomicAW1RawDao()
else:
raw_dao = GenomicAW2RawDao()
# Get member records
members = self.member_dao.get_members_from_member_ids(member_ids)
update_recs = []
completed_members = []
multiples = []
missing = []
metrics = [] # for PDR inserts
for member in members:
if not member.biobankId.startswith("HG"):
# add prefix to biobank_id
try:
pre = self.server_config[config.BIOBANK_ID_PREFIX][0]
except KeyError:
# Set default for unit tests
pre = "A"
bid = f"{pre}{member.biobankId}"
else:
bid = member.biobankId
# Get Raw AW1 Records for biobank IDs and genome_type
try:
raw_rec = raw_dao.get_raw_record_from_bid_genome_type(
biobank_id=bid,
genome_type=member.genomeType
)
except MultipleResultsFound:
multiples.append(member.id)
except NoResultFound:
missing.append(member.id)
else:
update_recs.append((member, raw_rec))
if update_recs:
# Get unique file_paths
paths = self.get_unique_file_paths_for_raw_records([rec[1] for rec in update_recs])
file_proc_map = self.map_file_paths_to_fp_id(paths)
# Process records
with self.member_dao.session() as session:
for record_to_update in update_recs:
# AW1
if self.job_id == GenomicJob.AW1_MANIFEST:
self.set_rdr_aw1_attributes_from_raw(record_to_update, file_proc_map)
self.set_aw1_attributes_from_raw(record_to_update)
# AW2
elif self.job_id == GenomicJob.METRICS_INGESTION:
self.preprocess_aw2_attributes_from_raw(record_to_update, file_proc_map)
metrics_obj = self.set_validation_metrics_from_raw(record_to_update)
metrics_obj = session.merge(metrics_obj)
session.commit()
metrics.append(metrics_obj.id)
session.merge(record_to_update[0])
completed_members.append(record_to_update[0].id)
# BQ Updates
if self.job_id == GenomicJob.METRICS_INGESTION:
# Metrics
bq_genomic_gc_validation_metrics_batch_update(metrics, project_id=self.bq_project_id)
genomic_gc_validation_metrics_batch_update(metrics)
# Members
bq_genomic_set_member_batch_update(metrics, project_id=self.bq_project_id)
genomic_set_member_batch_update(completed_members)
return self.compile_raw_ingestion_results(
completed_members,
missing,
multiples,
metrics
)
def ingest_data_files_into_gc_metrics(self, file_path, bucket_name):
try:
logging.info(f'Inserting data file: {file_path}')
data_file = file_path.split('/')[-1]
sample_id = data_file.split('_')[2]
member = self.member_dao.get_member_from_sample_id(sample_id)
metrics = None if not member else self.metrics_dao.get_metrics_by_member_id(member.id)
if metrics:
ext = file_path.split('.', 1)[-1]
attrs = []
for key, value in genomic_data_file_mappings.items():
if ext in value['file_ext']:
attrs = genomic_data_file_mappings[key]['model_attrs']
break
if attrs:
for value in attrs:
if 'Path' in value:
metrics.__setattr__(value, f'{bucket_name}/{file_path}')
else:
metrics.__setattr__(value, 1)
metrics_obj = self.metrics_dao.upsert(metrics)
bq_genomic_gc_validation_metrics_update(metrics_obj.id, project_id=self.bq_project_id)
bq_genomic_gc_validation_metrics_update(metrics_obj.id)
else:
message = f'{self.job_id.name}: Cannot find genomics metric record for sample id: {sample_id}'
logging.warning(message)
self.create_incident(
source_job_run_id=self.job_run.id,
code=GenomicIncidentCode.UNABLE_TO_FIND_METRIC.name,
message=message,
sample_id=sample_id if sample_id else '',
data_file_path=file_path
)
except RuntimeError:
logging.warning('Inserting data file failure')
def ingest_informing_loop_records(self, *, loop_type, records):
if records:
logging.info(f'Inserting informing loop for Participant: {records[0].participantId}')
module_type = [obj for obj in records if obj.fieldName == 'module_type' and obj.valueString]
decision_value = [obj for obj in records if obj.fieldName == 'decision_value' and obj.valueString]
loop_obj = GenomicInformingLoop(
participant_id=records[0].participantId,
message_record_id=records[0].messageRecordId,
event_type=loop_type,
event_authored_time=records[0].eventAuthoredTime,
module_type=module_type[0].valueString if module_type else None,
decision_value=decision_value[0].valueString if decision_value else None,
)
self.informing_loop_dao.insert(loop_obj)
def accession_data_files(self, file_path, bucket_name):
data_file_dao = GenomicGcDataFileDao()
if data_file_dao.get_with_file_path(file_path):
logging.info(f'{file_path} already exists.')
return 0
# split file name
file_attrs = self.parse_data_file_path(file_path)
# get GC
gc_id = self.get_gc_site_for_data_file(bucket_name, file_path,
file_attrs['name_components'])
# Insert record
data_file_record = GenomicGcDataFile(
file_path=file_path,
gc_site_id=gc_id,
bucket_name=bucket_name,
file_prefix=file_attrs['file_prefix'],
file_name=file_attrs['file_name'],
file_type=file_attrs['file_type'],
identifier_type=file_attrs['identifier_type'],
identifier_value=file_attrs['identifier_value'],
)
data_file_dao.insert(data_file_record)
def parse_data_file_path(self, file_path):
path_components = file_path.split('/')
name_components = path_components[-1].split("_")
# Set ID type and Value
id_type, id_value = self.set_identifier_fields(file_path, name_components)
# Set file type
if "idat" in file_path.lower():
file_type = name_components[-1]
else:
file_type = ".".join(name_components[-1].split('.')[1:])
attr_dict = {
'path_components': path_components,
'name_components': name_components,
'file_prefix': "/".join(path_components[1:-1]),
'file_name': path_components[-1],
'file_type': file_type,
'identifier_type': id_type,
'identifier_value': id_value
}
return attr_dict
@staticmethod
def set_identifier_fields(file_path: str, name_components: list):
if "genotyping" in file_path.lower():
id_type = "chipwellbarcode"
id_value = "_".join(name_components[0:2]).split('.')[0] # ex: 204027270091_R02C01_Grn.idat
elif "wgs" in file_path.lower():
id_type = "sample_id"
id_value = name_components[2] # ex: UW_A102807943_21046008189_689024_v1.cram
else:
id_type = None
id_value = None
return id_type, id_value
@staticmethod
def get_gc_site_for_data_file(bucket_name: str, file_path: str, name_components: list):
if "genotyping" in file_path.lower():
# get GC from bucket
name = bucket_name.split('-')[-1]
return genome_centers_id_from_bucket_array[name]
elif "wgs" in file_path.lower():
# get from name
return name_components[0].lower()
else:
return 'rdr'
def reconcile_feedback_records(self):
records = self.manifest_feedback_dao.get_feedback_reconcile_records()
logging.info('Running feedback records reconciliation')
for record in records:
if record.raw_feedback_count > record.feedbackRecordCount \
and record.raw_feedback_count != record.feedbackRecordCount:
logging.info(f'Updating feedback record count for file path: {record.filePath}')
feedback_record = self.manifest_feedback_dao.get(record.feedback_id)
feedback_record.feedbackRecordCount = record.raw_feedback_count
self.manifest_feedback_dao.update(feedback_record)
def gc_missing_files_record_clean_up(self, num_days=90):
logging.info('Running missing resolved data files cleanup')
self.missing_files_dao.remove_resolved_from_days(
num_days=num_days
)
def resolve_missing_gc_files(self, limit=800):
logging.info('Resolving missing gc data files')
files_to_resolve = self.missing_files_dao.get_files_to_resolve(limit)
if files_to_resolve:
resolve_arrays = [obj for obj in files_to_resolve if obj.identifier_type == 'chipwellbarcode']
| |
<gh_stars>0
#-*- coding: utf-8 -*-
#tentando importar python 2x ou 3x
try:
#python 2x
from Tkinter import Tk, Frame, Label, Menu, Scrollbar, Toplevel, Entry, Button, StringVar
from tkMessageBox import showinfo
from ttk import Treeview, Style, Button as Buttonttk, Label as Labelttk, Entry as Entryttk, OptionMenu as OptionMenuttk
from functools import partial
#print("python 2x")
except ImportError:
#python 3x
from tkinter import Tk, Frame, Label, Menu, Scrollbar, Toplevel, Entry, Button, StringVar
from tkinter.messagebox import showinfo
from tkinter.ttk import Treeview, Style, Button as Buttonttk, Label as Labelttk, Entry as Entryttk, OptionMenu as OptionMenuttk
from functools import partial
#print("python 3x")
except Exception as e:
print(e)
#pasta onde está a classe Controle
from sys import path
path.append("../control")
from controle import Controle
class Programa(Tk):
def __init__(self, **kwargs):
Tk.__init__(self, **kwargs)
self.title("1.0")
self.controle = Controle()
style = Style()
style.configure("TLabel", font = (None, 18))
style.configure("TButton", font = (None, 18))
style.configure("TMenubutton", font = (None, 18))
style.configure("Treeview", font = (None, 16), rowheight = 30)
style.configure("Treeview.Heading", font = (None, 18))
self.createMenubar()
self.createContainer()
def createMenubar(self):
"""
criar a barra de menu.
"""
menubar = Menu(font = (None, 12))
self.config(menu = menubar)
menubarMenu = Menu(menubar, tearoff = 0, font = (None, 12))
menubarMenu.add_command(label = "Jogadores", command = lambda: self.setTela("TelaJogadores"))
menubarMenu.add_command(label = "Pontos de guild", command = lambda: self.setTela("TelaPontosGuild"))
menubar.add_cascade(label = "Menu", menu = menubarMenu)
menubar.add_command(label = "Sobre", command = lambda: showinfo(title = "Sobre", message = "Não sei porque fiz isso.") )
def createContainer(self):
"""
posicionar todas as telas em um mesmo lugar.
"""
container = Frame(self)
container.grid_rowconfigure(0, weight = 1)
container.grid_columnconfigure(0, weight = 1)
self.telas = {}
for tela in (TelaJogadores, TelaPontosGuild):
name = tela.__name__
instanciaTela = tela(container, controle = self.controle)
instanciaTela.grid(row = 0, column = 0, sticky = "nswe")
self.telas[name] = instanciaTela
self.setTela("TelaJogadores")
container.pack(fill = "both", expand = True, padx = 5, pady = 5)
def setTela(self, tela):
"""
definiar qual a tela que será mostrada.
"""
self.telas[tela].tkraise()
def executar(self):
"""
definindo as dimensões da janela principal.
"""
self.geometry("650x400")
self.mainloop()
#################Tela jogadores########################
class TelaJogadores(Frame):
"""
um frame que contem um treeview e scroll
"""
def __init__(self, parent, controle, **kwargs):
Frame.__init__(self, parent, **kwargs)
self.controle = controle
self.createTree()
self.createButton()
self.updateTree()
def createTree(self):
"""
criar self.tree
"""
#criando os widgets
frame = Frame(self)
self.tree = Treeview(frame)
scroll = Scrollbar(frame, width = 22, command = self.tree.yview)
self.tree.configure(yscrollcommand = scroll.set)
#configurando as colunas de self.tree
self.tree["columns"] = ("idjogador", "nome", "cargo")
self.tree.heading("nome", text = "Nome")
self.tree.heading("cargo", text = "Cargo")
self.tree.column("#0", width = 0, stretch = False)
self.tree.column("idjogador", width = 0, stretch = False)
self.tree.column("nome", width = 307, minwidth = 307, stretch = True)
self.tree.column("cargo", width = 307, minwidth = 307, stretch = True)
#posicioanando
frame.pack(fill = "both", expand = True)
self.tree.pack(side = "left", fill = "both", expand = True)
scroll.pack(side = "left", fill = "y")
#command
def createButton(self):
"""
criar os botoes p/ adicionar ou remover jogadores
"""
#criando os botoes
buttonAdicionar = Buttonttk(self, width = 10, text = "+")
buttonRemover = Buttonttk(self, width = 10, text = "-")
#posicionando
buttonAdicionar.pack(side = "left", padx = 5, pady = 5)
buttonRemover.pack(side = "right", padx = 5, pady = 5)
#command
#chamado quando o botao é clicado
buttonAdicionar["command"] = partial(ToplevelJogadorCreate, self)
buttonRemover["command"] = self.buttonRemoverClick
#chamando quando é clicado 2x com o mouse em cima da tree
self.tree.bind("<Double-Button-1>", lambda event: self.updateJogador())
def updateJogador(self, *args):
"""
atualizar os dados de um jogador que já existe.
"""
#obtendo os valores do item que foi selecionado
itens = self.tree.item(self.tree.selection()[0])
idjogador = itens["values"][0]
nomeAntigo = itens["values"][1]
cargoAntigo = itens["values"][2]
#instanciando o toplevel
ToplevelJogadorUpdate(self, cargoDefault = cargoAntigo, nome = nomeAntigo, idjogador = idjogador)
def buttonRemoverClick(self):
#verificando se é válido a remoção
itens = self.tree.selection()
if itens:
for item in itens:
idjogador = self.tree.item(item)["values"][0]
self.controle.deleteJogador(idjogador)
self.updateTree()
def updateTree(self):
"""
atualizar self.tree com todos os valores da tabela 'jogadores'.
"""
self.tree.delete(*self.tree.get_children())
valores = self.controle.readAllJogador()
for valor in valores:
idjogador = valor[0]
nome = valor[1]
cargo = valor[2]
self.tree.insert("", "end", values = (idjogador, nome, cargo))
def getTree(self):
"""
retornar self.tree
"""
return self.tree
def getControle(self):
return self.controle
#classe abistrata
#nao deve ser instancializada, apenas expandida
class ToplevelJogador(Toplevel):
def __init__(self, parent, cargoDefault, nome, **kwargs):
Toplevel.__init__(self, **kwargs)
#parent
self.parent = parent
#dimensão da janela
x = parent.winfo_rootx()
y = parent.winfo_rooty()
self.geometry("650x150+{}+{}".format(x, y))
self.title("Jogador(a)")
#frame top
frameTop = Frame(self)
#criando o label & entry
label = Labelttk(frameTop, text = "Nome")
self.entry = Entryttk(frameTop, font = (None, 18))
self.entry.insert("end", nome)
self.entry.focus()
#criando o menu de cargos
self.dicionarioCargos = {
"novato": 1, "explorador": 2, "herdeiro": 3, "lider": 4
}
self.stringMenu = StringVar()
self.menu = OptionMenuttk(frameTop, self.stringMenu, cargoDefault, *[cargo for cargo in self.dicionarioCargos.keys()])
self.menu["width"] = 11
#crinando o label de aviso
self.labelAviso = Labelttk(self)
#criando o botao
self.button = Buttonttk(self, text = "B", width = 20)
#posicionando
frameTop.pack(fill = "x", padx = 5, pady = 5)
label.pack(side = "left")
self.entry.pack(side = "left", fill = "x", padx = 2)
self.menu.pack(side = "left")
self.labelAviso.pack(padx = 5, pady = 5)
self.button.pack(padx = 5, pady = 5)
#bind
label.bind("<Double-Button-1>", lambda event: self.entry.focus())
self.entry.bind("<Key>", lambda event: self.clearLabelAviso())
def clearLabelAviso(self, *args):
"""
caso self.labelTexto estejá mostrando uma mensagem de aviso, ela devesumir sempre que o usuario digitar em self.entry
"""
if self.labelAviso["text"]:
self.labelAviso["text"] = ""
def validar(self):
nome = self.entry.get().strip().lower()
cargo = self.dicionarioCargos[self.stringMenu.get()]
#se os campos de nome e cargo estiverem validos
if nome and cargo:
#se for um nome repetido
if self.parent.controle.readJogadorNome(nome):
self.labelAviso["text"] = "Jogador(a) já existe."
return False
#se nao for um nome repetido
return True
#se os campos de nome e cargo nao estiverem validos
self.labelAviso["text"] = "Campos inválidos."
return False
class ToplevelJogadorCreate(ToplevelJogador):
"""
criar uma nova janela p/ definir o nome e cargo do novo jogador.
"""
def __init__(self, parent, cargoDefault = "novato", nome = "", **kwargs):
ToplevelJogador.__init__(self, parent, cargoDefault, nome, **kwargs)
#definindo o nome do botao
self.button["text"] = "Adicionar"
#definindo qual função deve ser chamada quando o botao for clicado
self.button["command"] = self.buttonClick
self.entry.bind("<Return>", lambda event: self.buttonClick())
def buttonClick(self, *args):
"""
adicioanar o novo jogador ao banco, atualizar a tree.
"""
#referenciando os objetivos necessarios p/ inserir no banco
tree = self.parent.getTree()
controle = self.parent.getControle()
#validando os campos
nome = self.entry.get().strip().lower()
cargo = self.dicionarioCargos[self.stringMenu.get()]
#se for válido os campos
if nome.strip() and cargo:
#verificar se tem repeticao de nome
if controle.readJogadorNome(nome):
self.labelAviso["text"] = "Erro, jogador(a) já existe."
#se nao possuir repeticao de nome
else:
#inserindo o novo jogador na tabela do banco de dados.
controle.createJogador((nome, cargo))
#atualizando a tree.
self.parent.updateTree()
#limpando o campo de entrada de dados.
self.entry.delete(0, "end")
self.labelAviso["text"] = "Adicionado com sucesso."
#se nao for válido os campos
else:
self.labelAviso["text"] = "Campos inválidos."
class ToplevelJogadorUpdate(ToplevelJogador):
def __init__(self, parent, cargoDefault = "novato", nome = "", idjogador = -1, **kwargs):
ToplevelJogador.__init__(self, parent, cargoDefault, nome, **kwargs)
#atualizando o nome do botao
self.button["text"] = "Atualizar"
#preciso do nome antigo p/ saber se vai ter alteração no nome ou só no cargo.
self.nomeAntigo = nome
#precido do idjogador para identificar o jogador no banco de dados.
self.idjogador = idjogador
#definindo a função que deve ser chamada quando o botão é clicado.
self.button["command"] = self.buttonClick
self.entry.bind("<Return>", lambda event: self.buttonClick())
def buttonClick(self, *args):
"""
atualizar as informações do jogador e atualizar a tree
"""
#referenciando os objetos
controle = self.parent.getControle()
tree = self.parent.getTree()
#validando os campos
nome = self.entry.get().strip().lower()
cargo = self.dicionarioCargos[self.stringMenu.get()]
#verificar se o nome não foi alterado
if nome == self.nomeAntigo:
controle.updateJogador((nome, cargo, self.idjogador))
self.labelAviso["text"] = "Atualizado."
self.parent.updateTree()
#se o nome for alterado
else:
#verificar se o novo nome ja existe
if controle.readJogadorNome(nome):
self.labelAviso["text"] = "Erro, usuario ja existe."
#se o novo nome for valido
else:
controle.updateJogador((nome, cargo, self.idjogador))
self.parent.updateTree()
self.labelAviso["text"] = "Atualizado."
#################Tela pontos de guild########################
class TelaPontosGuild(Frame):
def __init__(self, parent, controle, **kwargs):
Frame.__init__(self, parent, **kwargs)
self.controle = controle
self.createTreeData()
self.createTreeGp()
self.updataTreeData()
#alterando o background
self["background"] = "white"
def createTreeData(self):
"""
criar o local das datas
"""
#criando um estilo p/ essa tree
style = Style()
style.configure("data.Treeview.Heading", font = (None, 12))
#criando os widgets
mainFrame = Frame(self, width = 200)
topFrame = Frame(mainFrame)
bottomFrame = Frame(mainFrame, height = 50)
#crinando a tree e o scroll
self.treeData = Treeview(topFrame, style = "data.Treeview")
scroll = Scrollbar(topFrame, width = 30, command = self.treeData.yview)
self.treeData.configure(yscrollcommand = scroll.set)
#configurando as colunas
self.treeData["columns"] = ("iddata", "data")
self.treeData.heading("iddata", text = "iddata")
self.treeData.heading("data", text = "Data")
self.treeData.column("#0", width = 0, stretch = False)
self.treeData.column("iddata", width = 0, stretch = False)
self.treeData.column("data", width = 170, minwidth = 170, stretch = True)
#criando os botoes
buttonAdicionar = Buttonttk(bottomFrame, text = "+", width = 5)
buttonRemover = Buttonttk(bottomFrame, text = "-", width = 5)
#posicionando
mainFrame.pack(side = "left", fill = "y", padx = 5, pady = 5)
mainFrame.pack_propagate(False)
topFrame.pack(fill = "both", expand = True, pady = 0)
bottomFrame.pack(fill = "x", pady = 5)
self.treeData.pack(side = "left", fill = "y")
scroll.pack(side = "left", fill = "y")
buttonAdicionar.pack(side = "left")
buttonRemover.pack(side = "right")
#command
#instanciando ToplevelData sempre que buttonAdicionar for clicado
buttonAdicionar["command"] = lambda parent = self: ToplevelData(parent)
#chamando o método removerData sempre que buttonRemover for clicado
buttonRemover["command"] = self.removerData
#chamando o método self.updateTeeGp sempre que for clicado 2x em cima de um item de self.treeData
self.treeData.bind("<Double-Button-1>", self.updateTreeGp)
def createTreeGp(self):
"""
criar o local do gp.
"""
#criando os widgets
frame = Frame(self)
#criando a tree e o scroll
self.treeGp = Treeview(frame, style = "data.Treeview")
scroll = Scrollbar(frame, command = self.treeGp.yview, width = 22)
self.treeGp.configure(yscrollcommand = scroll.set)
#configurando as colunas
self.treeGp["columns"] = ("id_data", "nome", "cargo", "gp")
self.treeGp.heading("id_data", text = "id_data")
self.treeGp.heading("nome", text = "nome")
self.treeGp.heading("cargo", text = "cargo")
self.treeGp.heading("gp", text = "gp")
self.treeGp.column("#0", width = 0, stretch = False)
self.treeGp.column("id_data", width = 0, stretch = False)
self.treeGp.column("nome", width = 100 )
self.treeGp.column("cargo", width = 60)
self.treeGp.column("gp", width = 75)
#posicionando
frame.pack(side = "left", fill = "both", expand = True, padx = 5, pady = 5)
self.treeGp.pack(side = "left", fill = "both", expand = True)
scroll.pack(side = "left", fill = "y")
#bind
#instanciar ToplevelGp sempre que clicar 2x em cima de um jogador
self.treeGp.bind("<Double-Button-1>", lambda event, parent = self: ToplevelGp(parent))
def adicionarNovaData(self, dia, mes, ano):
"""
inserir a nova data na tabela.
preencher a tabela gp.
atualizar a tree que mostra a data.
atualizar a tree que mostra o gp.
"""
#formatando a data para inserir no banco
data = "/".join([ano, mes, dia])
#inserindo na tabela data
id_data = self.controle.createData(data)
#preenchendo a tabela gp | |
<gh_stars>1-10
from enum import auto, Enum
import logging
import pathlib
from .yaml import load_yaml
import tomli
from .cache import Cache
from .signals import document_loaded
from typing import (
Any,
Callable,
Dict,
Generic,
Iterable,
List,
Literal,
Optional,
Type,
TypeVar,
Union,
)
import re
import dateparser
T = TypeVar('T')
class Publisher:
"""A publisher produces the final output files, applying templates etc. as
needed.
"""
def publish_document(self, document: 'DocumentNode') -> pathlib.Path:
"""Publish a document node.
:return: The path of the generated file.
"""
pass
def publish_index(self, index: 'IndexNode') -> pathlib.Path:
"""Publish an index node.
:return: The path of the generated file."""
pass
def publish_resource(self, resource: 'ResourceNode') -> pathlib.Path:
"""Publish a resource node.
:return: The path of the generated file."""
pass
def publish_static(self, static: 'StaticNode') -> pathlib.Path:
"""Publish a static node.
:return: The path of the generated file."""
pass
def publish_generated(self, generated: 'GeneratedNode') -> pathlib.Path:
"""Publish a generated node.
:return: The path of the generated file."""
pass
class NodeKind(Enum):
Resource = auto()
Index = auto()
Document = auto()
Data = auto()
Static = auto()
Generated = auto()
class Node:
kind: NodeKind
"""The node kind, must be set in the constructor."""
src: Optional[pathlib.Path]
"""The full path to the source file.
This is an OS specific path object."""
path: pathlib.PurePosixPath
"""The output path, relative to the page root.
All paths *must* start with ``/``.
"""
metadata: Dict[str, Any]
"""Metadata associated with this node."""
parent: Optional['Node']
"""The parent node, if any."""
__nodes: Dict[str, 'Node']
"""A dictionary containing all child nodes.
The key is the path to the child node relative to this node. I.e. if the
path of this node is ``/foo``, and it has a child at ``/foo/bar``, the
key for that child would be ``bar``."""
@property
def children(self):
"""A list containing all direct children of this node."""
return self.__nodes.values()
def __init__(self):
self.__nodes = {}
self.metadata = {}
self.parent = None
def add_child(self, child: 'Node') -> None:
"""Add a new child to this node.
The path of the child node must be a sub-path of the current node
path, with exactly one more component. I.e. if the current node path is
``/foo/bar``, a node with path ``/foo/bar/baz`` can be added as a
child, but ``/baz/`` or ``/foo/bar/boo/baz`` would be invalid."""
assert self.path != child.path
name = child.path.relative_to(self.path).parts[0]
self.__nodes[name] = child
child.parent = self
def __repr__(self):
return f'{self.__class__.__name__}({self.path})'
def select_children(self):
"""Select all children of this node and return them as a
:py:class:`~liara.query.Query`."""
from .query import Query
return Query(self.children)
def get_child(self, name) -> Optional['Node']:
"""Get a child of this node.
:return: The child node or ``None`` if no such child exists."""
return self.__nodes.get(name)
def get_children(self, *, recursive=False) -> Iterable['Node']:
"""Get all children of this node.
This function differs from :py:meth:`select_children` in two important
ways:
* It returns a list of :py:class:`Node` instances and does not wrap it
in a :py:class:`~liara.query.Query`
* It can enumerate all children recursively.
"""
for child in self.children:
yield child
if recursive:
yield from child.get_children(recursive=True)
def process(self, cache: Cache) -> None:
"""Some nodes -- resources, documents, etc. need to be processed. As
this can be a resource-intense process (for instance, it may require
generating images), processing can cache results and has to be
called separately instead of being executed as part of some other
operation.
By convention this method should populate ``self.content``.
"""
pass
_metadata_marker = re.compile(r'(---|\+\+\+)\n')
class MetadataKind(Enum):
Unknown = auto()
Yaml = auto()
Toml = auto()
def extract_metadata_content(text: str):
"""Extract metadata and content.
Metadata is stored at the beginning of the file, separated using a metadata
seperation marker, for instance::
+++
this_is_toml = True
+++
content
This function splits the provided text into metadata and actual content.
"""
meta_start, meta_end = 0, 0
content_start, content_end = 0, 0
metadata_kind = MetadataKind.Unknown
# If the document doesn't end with a trailing new-line, the metadata regex
# will get confused. We'll thus add a new-line to make sure this works
if text and text[-1] != '\n':
text += '\n'
for match in _metadata_marker.finditer(text):
if meta_start == 0:
if match.group() == '---\n':
metadata_kind = MetadataKind.Yaml
elif match.group() == '+++\n':
metadata_kind = MetadataKind.Toml
meta_start = match.span()[1]
elif meta_end == 0:
if match.group() == '---\n':
if metadata_kind != MetadataKind.Yaml:
raise Exception('Metadata markers mismatch -- started '
'with "---", but ended with "+++"')
elif match.group() == '+++\n':
if metadata_kind != MetadataKind.Toml:
raise Exception('Metadata markers mismatch -- started '
'with "+++", but ended with "---"')
meta_end = match.span()[0]
content_start = match.span()[1]
content_end = len(text)
break
if metadata_kind == MetadataKind.Yaml:
metadata = load_yaml(text[meta_start:meta_end])
elif metadata_kind == MetadataKind.Toml:
metadata = tomli.loads(text[meta_start:meta_end])
else:
# We didn't find any metadata here, so everything must be content
return {}, text
content = text[content_start:content_end]
return metadata, content
def fixup_relative_links(document: 'DocumentNode'):
"""Replace relative links in the document with links relative to the
site root."""
# early out if there's no relative link in here, as the parsing is
# very expensive
if "href=\"." not in document.content:
return
from bs4 import BeautifulSoup
soup = BeautifulSoup(document.content, 'lxml')
def is_relative_url(s):
return s and s[0] == '.'
for link in soup.find_all('a', {'href': is_relative_url}):
target = link.attrs.get('href')
link.attrs['href'] = \
str(document.path.parent / pathlib.PurePosixPath(target))
document.content = str(soup)
def fixup_date(document: 'DocumentNode'):
"""If the date in the document is a string, try to parse it to produce a
datetime object."""
if 'date' in document.metadata:
date = document.metadata['date']
if isinstance(date, str):
document.metadata['date'] = dateparser.parse(date)
class FixupDateTimezone:
"""Set the timezone of the ``metadata['date']`` field to the local timezone
if no timezone has been set."""
def __init__(self):
import tzlocal
self.__tz = tzlocal.get_localzone()
def __call__(self, document: 'DocumentNode'):
'''If the date in the document has no timezone info, set it to the local
timezone.'''
if 'date' in document.metadata:
date = document.metadata['date']
if date.tzinfo is None:
document.metadata['date'] = date.replace(tzinfo=self.__tz)
class DocumentNode(Node):
_load_fixups: List[Callable]
"""These functions are called right after the document has been loaded,
and can be used to fixup metadata, content, etc. before it gets processed
(These should be called before :py:meth:`load`/:py:meth:`reload`
returns.)"""
_process_fixups: List[Callable]
"""These functions are called after a document has been processed
(These should be called before :py:meth:`process` returns)."""
def __init__(self, src, path, metadata_path=None):
super().__init__()
self.kind = NodeKind.Document
self.src = src
self.path = path
self.metadata_path = metadata_path
self.content = None
self._load_fixups = []
self._process_fixups = []
def set_fixups(self, *, load_fixups, process_fixups) -> None:
"""Set the fixups that should be applied to this document node.
The fixups should be set *before* calling :py:meth:`load`.
:param load_fixups: These functions will be executed before
:py:meth:`load` returns.
:param process_fixups: These functions will be executed before
:py:meth:`process` returns.
"""
self._load_fixups = load_fixups
self._process_fixups = process_fixups
def load(self):
"""Load the content of this node."""
self._load()
self._apply_load_fixups()
document_loaded.send(self, document=self, content=self._raw_content)
def validate_metadata(self):
if self.metadata is None:
raise Exception(f"No metadata for document: '{self.src}'")
if 'title' not in self.metadata:
raise Exception(f"'title' missing for Document: '{self.src}'")
def _apply_load_fixups(self):
for fixup in self._load_fixups:
fixup(self)
def _apply_process_fixups(self):
for fixup in self._process_fixups:
fixup(self)
def _load(self):
if self.metadata_path:
self.metadata = load_yaml(self.metadata_path.read_text())
self._raw_content = self.src.read_text('utf-8')
else:
self.metadata, self._raw_content = \
extract_metadata_content(self.src.read_text('utf-8'))
def reload(self):
"""Reload this node from disk.
By default, this just forwards to :py:meth:`_load`.
"""
self._load()
self._apply_load_fixups()
def publish(self, publisher: Publisher) -> pathlib.Path:
"""Publish this node using the provided publisher."""
return publisher.publish_document(self)
class HtmlDocumentNode(DocumentNode):
"""A node representing a Html document."""
def process(self, cache: Cache):
self.content = self._raw_content
self._apply_process_fixups()
class MarkdownDocumentNode(DocumentNode):
"""A node representing a Markdown document."""
def process(self, cache: Cache):
import markdown
from .md import HeadingLevelFixupExtension
import hashlib
byte_content = self._raw_content.encode('utf-8')
content_hash = hashlib.sha256(byte_content).digest()
if content := cache.get(content_hash):
self.content = content
return
extensions = [
'pymdownx.arithmatex',
HeadingLevelFixupExtension(),
'fenced_code',
'codehilite',
'smarty',
'tables',
'admonition'
]
extension_configs = {
'codehilite': {
'css_class': 'code'
},
'pymdownx.arithmatex': {
'generic': True
}
}
self.content = markdown.markdown(self._raw_content,
extensions=extensions,
extension_configs=extension_configs)
self._apply_process_fixups()
cache.put(content_hash, self.content)
class DataNode(Node):
"""A data node.
Data nodes consist of a dictionary. This can be used to store arbitrary
data as part of a :py:class:`liara.site.Site`, and make it available to
templates (for instance, a menu structure could go into a data node.)
"""
def __init__(self, src, path):
super().__init__()
self.kind = NodeKind.Data
self.src = src
self.path = path
self.content = load_yaml(self.src.open('r'))
class IndexNode(Node):
"""An | |
replicate to all lanes of three registers"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def LD4R(cpu_context: ProcessorContext, instruction: Instruction):
"""Load single 4-element structure and replicate to all lanes of four registers"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
#endregion
#region Compare and Swap
@opcode
def CAS(cpu_context: ProcessorContext, instruction: Instruction):
"""Compare and swap"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def CASP(cpu_context: ProcessorContext, instruction: Instruction):
"""Compare and swap pair"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
#endregion
#region Atomic memory operations
@opcode
def LDADD(cpu_context: ProcessorContext, instruction: Instruction):
"""Atomic add"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def LDCLR(cpu_context: ProcessorContext, instruction: Instruction):
"""Atomic bit clear"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def LDEOR(cpu_context: ProcessorContext, instruction: Instruction):
"""Atomic exclusive OR"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def LDSET(cpu_context: ProcessorContext, instruction: Instruction):
"""Atomic bit set"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def LDMAX(cpu_context: ProcessorContext, instruction: Instruction):
"""Atomic signed maximum"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def LDMIN(cpu_context: ProcessorContext, instruction: Instruction):
"""Atomic signed minimum"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def LDUMAX(cpu_context: ProcessorContext, instruction: Instruction):
"""Atomic unsigned maximum"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def LDUMIN(cpu_context: ProcessorContext, instruction: Instruction):
"""Atomic unsigned minimum"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def STADD(cpu_context: ProcessorContext, instruction: Instruction):
"""Atomic add, without return"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def STCLR(cpu_context: ProcessorContext, instruction: Instruction):
"""Atomic bit clear, without return"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def STEOR(cpu_context: ProcessorContext, instruction: Instruction):
"""Atomic exclusive OR, without return"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def STSET(cpu_context: ProcessorContext, instruction: Instruction):
"""Atomic bit set, without return"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def STMAX(cpu_context: ProcessorContext, instruction: Instruction):
"""Atomic signed maximum, without return"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def STMIN(cpu_context: ProcessorContext, instruction: Instruction):
"""Atomic signed minimum, without return"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def STUMAX(cpu_context: ProcessorContext, instruction: Instruction):
"""Atomic unsigned maximum, without return"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def STUMIN(cpu_context: ProcessorContext, instruction: Instruction):
"""Atomic unsigned minimum, without return"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
#endregion
#region Swap
@opcode
def SWP(cpu_context: ProcessorContext, instruction: Instruction):
"""Swap"""
operands = instruction.operands
value_b = operands[1].value
value_c = operands[2].value
logger.debug("Swap %s %s %s", operands[0].text, operands[1].text, operands[2].text)
operands[0].value = value_c
operands[2].value = value_b
#endregion
#region Arithmetic (immediate)
@opcode("add")
@opcode("adc")
def ADD(cpu_context: ProcessorContext, instruction: Instruction):
"""
Handle both ADC and ADD here since the only difference is the flags.
"""
operands = instruction.operands
term_1 = operands[-2].value
term_2 = operands[-1].value
result = term_1 + term_2
if instruction.root_mnem.startswith("adc"):
result += cpu_context.registers.c
width = get_max_operand_size(operands)
mask = utils.get_mask(width)
if instruction.flag_update:
cpu_context.registers.c = int(result > mask)
cpu_context.registers.z = int(result & mask == 0)
cpu_context.registers.n = utils.sign_bit(result, width)
cpu_context.registers.v = int(utils.sign_bit(~(term_1 ^ term_2) & (term_2 ^ result), width) == 0)
cpu_context.jcccontext.update_flag_opnds(["c", "z", "n", "v"], operands)
result = result & mask
logger.debug("0x%X + 0x%X = 0x%X", term_1, term_2, result)
operands[0].value = result
# TODO: Due to simplification, it may be better to just keep the opcodes separate.
@opcode("sub")
@opcode("sbc")
@opcode("rsb")
@opcode("rsc")
def SUB(cpu_context: ProcessorContext, instruction: Instruction):
"""Subtract"""
operands = instruction.operands
term_1 = operands[1].value
term_2 = operands[2].value
if instruction.mnem.startswith("r"): # reverse subtract
term_1, term_2 = term_2, term_1
result = term_1 - term_2
if instruction.mnem.startswith(("sbc", "rsc")):
result -= cpu_context.registers.c ^ 1
if instruction.flag_update:
width = get_max_operand_size(operands)
mask = utils.get_mask(width)
cpu_context.registers.c = int((term_1 & mask) < (term_2 & mask))
cpu_context.registers.z = int(result & mask == 0)
cpu_context.registers.n = utils.sign_bit(result, width)
cpu_context.registers.v = int(utils.sign_bit((term_1 ^ term_2) & (term_1 ^ result), width) == 0)
cpu_context.jcccontext.update_flag_opnds(["c", "z", "n", "v"], operands)
logger.debug("0x%X - 0x%X = 0x%X", term_1, term_2, result)
operands[0].value = result
@opcode
def CMP(cpu_context: ProcessorContext, instruction: Instruction):
"""Compare"""
operands = instruction.operands
term_1 = operands[0].value
term_2 = operands[1].value
result = term_1 - term_2
width = get_max_operand_size(operands)
# Flags are always updated for CMP
mask = utils.get_mask(width)
cpu_context.registers.c = int((term_1 & mask) < (term_2 & mask))
cpu_context.registers.z = int(result & mask == 0)
cpu_context.registers.n = utils.sign_bit(result, width)
cpu_context.registers.v = int(utils.sign_bit((term_1 ^ term_2) & (term_1 ^ result), width) == 0)
cpu_context.jcccontext.update_flag_opnds(["c", "z", "n", "v"], operands)
logger.debug("0x%X <-> 0x%X = 0x%X", term_1, term_2, result)
@opcode
def CMN(cpu_context: ProcessorContext, instruction: Instruction):
"""Compare negative"""
operands = instruction.operands
value_a = operands[1].value
value_b = operands[2].value
result = value_a + value_b
width = get_max_operand_size(operands)
mask = utils.get_mask(width)
cpu_context.registers.c = int(result > mask)
cpu_context.registers.z = int(result & mask == 0)
cpu_context.registers.n = utils.sign_bit(result, width)
cpu_context.registers.v = int(utils.sign_bit(~(value_a ^ value_b) & (value_b ^ result), width) == 0)
cpu_context.jcccontext.update_flag_opnds(["c", "z", "n", "v"], operands)
logger.debug("0x%X <-> 0x%X = 0x%X", value_a, value_b, result)
#endregion
#region Logical (immediate)
@opcode
def AND(cpu_context: ProcessorContext, instruction: Instruction):
"""Bitwise AND"""
operands = instruction.operands
opvalue2 = operands[1].value
opvalue3 = operands[2].value
result = opvalue2 & opvalue3
if instruction.flag_update:
width = get_max_operand_size(operands)
cpu_context.registers.z = int(result == 0)
cpu_context.registers.n = utils.sign_bit(result, width)
cpu_context.jcccontext.update_flag_opnds(["z", "n"], operands)
logger.debug("0x%X & 0x%X = 0x%X", opvalue2, opvalue3, result)
operands[0].value = result
def TST(cpu_context: ProcessorContext, instruction: Instruction):
"""Test bits (same as ANDS, but result is discarded)"""
operands = instruction.operands
opvalue2 = operands[1].value
opvalue3 = operands[2].value
result = opvalue2 & opvalue3
width = get_max_operand_size(operands)
cpu_context.registers.z = int(result == 0)
cpu_context.registers.n = utils.sign_bit(result, width)
cpu_context.jcccontext.update_flag_opnds(["z", "n"], operands)
logger.debug("0x%X & 0x%X = 0x%X", opvalue2, opvalue3, result)
@opcode
def EOR(cpu_context: ProcessorContext, instruction: Instruction):
"""Bitwise exclusive OR"""
operands = instruction.operands
opvalue2 = operands[1].value
opvalue3 = operands[2].value
result = opvalue2 ^ opvalue3
if instruction.flag_update:
width = get_max_operand_size(operands)
cpu_context.registers.z = int(result == 0)
cpu_context.registers.n = utils.sign_bit(result, width)
cpu_context.jcccontext.update_flag_opnds(["z", "n"], operands)
logger.debug("0x%X ^ 0x%X = 0x%X", opvalue2, opvalue3, result)
operands[0].value = result
@opcode
def TEQ(cpu_context: ProcessorContext, instruction: Instruction):
"""Test Equivalence (same as EORS, except the result is discarded)"""
operands = instruction.operands
opvalue2 = operands[1].value
opvalue3 = operands[2].value
result = opvalue2 ^ opvalue3
width = get_max_operand_size(operands)
cpu_context.registers.z = int(result == 0)
cpu_context.registers.n = utils.sign_bit(result, width)
cpu_context.jcccontext.update_flag_opnds(["z", "n"], operands)
logger.debug("0x%X ^ 0x%X = 0x%X", opvalue2, opvalue3, result)
@opcode
def ORR(cpu_context: ProcessorContext, instruction: Instruction):
"""Bitwise inclusive OR"""
operands = instruction.operands
opvalue2 = operands[1].value
opvalue3 = operands[2].value
result = opvalue2 | opvalue3
if instruction.flag_update:
width = get_max_operand_size(operands)
cpu_context.registers.z = int(result == 0)
cpu_context.registers.n = utils.sign_bit(result, width)
cpu_context.jcccontext.update_flag_opnds(["z", "n"], operands)
logger.debug("0x%X | 0x%X = 0x%X", opvalue2, opvalue3, result)
operands[0].value = result
#endregion
#region Move (wide immediate)
@opcode("mov")
@opcode("movz")
@opcode("cpy")
def MOV(cpu_context: ProcessorContext, instruction: Instruction):
"""Move wide with zero"""
operands = instruction.operands
result = operands[1].value
if instruction.flag_update:
width = get_max_operand_size(operands)
cpu_context.registers.z = int(result == 0)
cpu_context.registers.n = utils.sign_bit(result, width)
cpu_context.jcccontext.update_flag_opnds(["z", "n"], operands)
operands[0].value = result
@opcode
def MOVN(cpu_context: ProcessorContext, instruction: Instruction):
"""Move wide with NOT"""
operands = instruction.operands
result = ~operands[1].value
if instruction.flag_update:
width = get_max_operand_size(operands)
cpu_context.registers.z = int(result == 0)
cpu_context.registers.n = utils.sign_bit(result, width)
cpu_context.jcccontext.update_flag_opnds(["z", "n"], operands)
operands[0].value = result
@opcode
def MOVK(cpu_context: ProcessorContext, instruction: Instruction):
"""Move wide with keep"""
operands = instruction.operands
# TODO: Is it always lsl?
shift_mask = {
0: 0xFFFFFFFFFFFF0000,
16: 0xFFFFFFFF0000FFFF,
32: 0xFFFF0000FFFFFFFF,
48: 0x0000FFFFFFFFFFFF,
}[operands[1].shift_count]
operands[0].value = (operands[0].value & shift_mask) | operands[1].value
#endregion
#region PC-relative address calculation
@opcode
def ADRP(cpu_context: ProcessorContext, instruction: Instruction):
"""Compute address of 4KB page at a PC-relative offset"""
operands = instruction.operands
pc = cpu_context.registers.pc
pc = pc & 0xFFFFFFFFFFFFF000 # Zero out bottom 12 bits of PC
opvalue2 = operands[1].value
result = pc + 0x1000*opvalue2
logger.debug("0x%X + 0x1000*0x%X = 0x%X", pc, opvalue2, result)
operands[0].value = result
@opcode
def ADR(cpu_context: ProcessorContext, instruction: Instruction):
"""Compute address of label at a PC-relative offset."""
operands = instruction.operands
pc = cpu_context.registers.pc
opvalue2 = operands[1].value
result = pc + opvalue2
logger.debug("0x%X + 0x%X = 0x%X", pc, opvalue2, result)
operands[0].value = result
#endregion
#region Bitfield move
@opcode
def BFM(cpu_context: ProcessorContext, instruction: Instruction):
"""Bitfield move"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def SBFM(cpu_context: ProcessorContext, instruction: Instruction):
"""Signed bitfield move"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def UBFM(cpu_context: ProcessorContext, instruction: Instruction):
"""Unsigned bitfield move (32-bit)"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
#endregion
#region Bitfield insert and extract
@opcode
def BFC(cpu_context: ProcessorContext, instruction: Instruction):
"""Bitfield insert clear"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def BFI(cpu_context: ProcessorContext, instruction: Instruction):
"""Bitfield insert"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def BFXIL(cpu_context: ProcessorContext, instruction: Instruction):
"""Bitfield extract and insert low"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def SBFIZ(cpu_context: ProcessorContext, instruction: Instruction):
"""Signed bitfield insert in zero"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def SBFX(cpu_context: ProcessorContext, instruction: Instruction):
"""Signed bitfield extract"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def UBFIZ(cpu_context: ProcessorContext, instruction: Instruction):
"""Unsigned bitfield insert in zero"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
@opcode
def UBFX(cpu_context: ProcessorContext, instruction: Instruction):
"""Unsigned bitfield extract"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
#endregion
#region Extract register
@opcode
def EXTR(cpu_context: ProcessorContext, instruction: Instruction):
"""Extract register from pair"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
#endregion
#region Shift (immediate)
@opcode
def ASR(cpu_context: ProcessorContext, instruction: Instruction):
"""Arithmetic shift right"""
operands | |
<reponame>pinyoothotaboot/engineering_tool
import math
class Area:
"""
Function : circularsector
Description : This function to calculate area of circular sector.
Formula : angle/2 x radius^2
Input :
- Radius number type integer or float
- Angle number type integer or float
Return : Area of circle sector in type interger or float
Example : circularsector(1,0)
>> 3.1416
"""
def circularsector(radius,angle):
if type(radius) not in [int,float]:
raise TypeError("The radius must be a non-negative real number.")
if type(angle) not in [int,float]:
raise TypeError("The degree must be a non-negative real number.")
if radius < 0:
raise ValueError("The radius cannot be negative.")
if angle < 0:
raise ValueError("The degree cannot be negative.")
return (angle/2)*(radius**2)
"""
Function : circle
Description : This function to calculate area of circle.
Formula : PI * Radius^2
Input : Radius number type integer or float
Return : Area of circle in type interger or float
Example : circle(1)
>> 3.1416
"""
def circle(radius):
if type(radius) not in [int,float]:
raise TypeError("The radius must be a non-negative real number.")
if radius < 0:
raise ValueError("The radius cannot be negative.")
return math.pi * (radius**2)
"""
Function : rectangle
Description : This function to calculate area of rectangle.
Formula : Base x Height
Input :
- Base number type integer or float
- Height number type integer or float
Return : Area of rectangle in type interger or float
Example : rectangle(2,2)
>> 4
"""
def rectangle(base,height):
if type(base) not in [int,float]:
raise TypeError("The base must be a non-negative real number.")
if type(height) not in [int,float]:
raise TypeError("The base must be a non-negative real number.")
if base < 0:
raise ValueError("The base cannot be negative.")
if height < 0:
raise ValueError("The height cannot be negative.")
return base * height
"""
Function : square
Description : This function to calculate area of square.
Formula : val^2
Input : Val number type integer or float
Return : Area of square in type interger or float
Example : square(2)
>> 4
"""
def square(val):
if type(val) not in [int,float]:
raise TypeError("The value must be a non-negative real number.")
if val<0:
raise ValueError("The val cannot be negative.")
return val*val
"""
Function : triangle
Description : This function to calculate area of triangle.
Formula : 0.5 x Base x Height
Input :
- Base number type integer or float
- Height number type integer or float
Return : Area of triangle in type interger or float
Example : rectangle(2,2)
>> 2
"""
def triangle(base,height):
if type(base) not in [int,float]:
raise TypeError("The base must be a non-negative real number.")
if type(height) not in [int,float]:
raise TypeError("The height must be a non-negative real number.")
if base < 0:
raise ValueError("The base cannot be negative.")
if height < 0:
raise ValueError("The height cannot be negative.")
return 0.5 * base * height
"""
Function : equilateral
Description : This function to calculate area of equilateral triangle.
Formula : sqrt(3)/4 x s^2
Input :
- S number type integer or float
Return : Area of equilateral triangle in type interger or float
Example : equilateral(0)
>> 0
"""
def equilateral(s):
if type(s) not in [int,float]:
raise TypeError("The s side must be a non-negative real number.")
if s < 0:
raise ValueError("The s side cannot be negative.")
return (math.sqrt(3)/4) * (s**2)
"""
Function : isosceles
Description : This function to calculate area of isosceles triangle.
Formula : b/4 x sqrt(4a^2 - b^2)
Input :
- A number type integer or float
- B number type integer or float
Return : Area of isosceles triangle in type interger or float
Example : isosceles(0,0)
>> 0
"""
def isosceles(a,b):
if type(a) not in [int,float]:
raise TypeError("The a side must be a non-negative real number.")
if type(b) not in [int,float]:
raise TypeError("The b side must be a non-negative real number.")
if a < 0:
raise ValueError("The a side cannot be negative.")
if b < 0:
raise ValueError("The b side cannot be negative.")
return (1/4)*b*math.sqrt(4*(a**2)-(b**2))
"""
Function : hexagon
Description : This function to calculate area of regular hexagon.
Formula : 3/2 x sqrt(3) x s^2
Input :
- S number type integer or float
Return : Area of regular hexagon in type interger or float
Example : hexagon(0)
>> 0
"""
def hexagon(s):
if type(s) not in [int,float]:
raise TypeError("The s side must be a non-negative real number.")
if s < 0:
raise ValueError("The s side cannot be negative.")
return (3/2)*math.sqrt(3)*(s**2)
"""
Function : octagon
Description : This function to calculate area of regular octagon.
Formula : 2 x (1+sqrt(2)) x s^2
Input :
- S number type integer or float
Return : Area of regular octagon in type interger or float
Example : octagon(0)
>> 0
"""
def octagon(s):
if type(s) not in [int,float]:
raise TypeError("The s side must be a non-negative real number.")
if s < 0:
raise ValueError("The s side cannot be negative.")
return 2 * (1+math.sqrt(2))*(s**2)
"""
Function : parallelogram
Description : This function to calculate area of parallelogram.
Formula : Base x Height
Input :
- Base number type integer or float
- Height number type integer or float
Return : Area of parallelogram in type interger or float
Example : parallelogram(2,2)
>> 4
"""
def parallelogram(base,height):
if type(base) not in [int,float]:
raise TypeError("The base must be a non-negative real number.")
if type(height) not in [int,float]:
raise TypeError("The base must be a non-negative real number.")
if base < 0:
raise ValueError("The base cannot be negative.")
if height < 0:
raise ValueError("The height cannot be negative.")
return base * height
"""
Function : trapezoid
Description : This function to calculate area of trapezoid.
Formula : 0.5 x (Base1+Base2) x Height
Input :
- Base number type integer or float
- Height number type integer or float
Return : Area of trapezoid in type interger or float
Example : trapezoid(2,2,2)
>> 4
"""
def trapezoid(base1,base2,height):
if type(base1) not in [int,float]:
raise TypeError("The base 1 must be a non-negative real number.")
if type(base2) not in [int,float]:
raise TypeError("The base 2 must be a non-negative real number.")
if type(height) not in [int,float]:
raise TypeError("The height must be a non-negative real number.")
if base1 < 0:
raise ValueError("The base 1 cannot be negative.")
if base2 < 0:
raise ValueError("The base 2 cannot be negative.")
if height < 0:
raise ValueError("The height cannot be negative.")
return 0.5 * (base1+base2) * height
"""
Function : cylinder
Description : This function to calculate area of surface cylinder.
Formula : 2 x pi x radius x(radius+height)
Input :
- Radius number type integer or float
- Height number type integer or float
Return : Area of surface cylinder in type interger or float
Example : cylinder(0,0)
>> 0
"""
def cylinder(radius,height):
if type(radius) not in [int,float]:
raise TypeError("The radius must be a non-negative real number.")
if type(height) not in [int,float]:
raise TypeError("The height must be a non-negative real number.")
if radius < 0:
raise ValueError("The radius cannot be negative.")
if height < 0:
raise ValueError("The height cannot be negative.")
return 2 * math.pi * radius * (radius+height)
"""
Function : sphere
Description : This function to calculate area of surface sphere.
Formula : 4 x pi x radius^2
Input :
- Radius number type integer or float
Return : Area of surface sphere in type interger or float
Example : sphere(0)
>> 0
"""
def sphere(radius):
if type(radius) not in [int,float]:
raise TypeError("The radius must be a non-negative real number.")
if radius < 0:
raise ValueError("The radius cannot be negative.")
return 4 * math.pi * (radius**2)
"""
Function : pyramid
Description : This function to calculate area of surface pyramid.
Formula : B + PL/2
Input | |
== 11 and not ob.complet)
ob = Observation(json.dumps(dict((obs_1, loc1, dat3))))
self.assertTrue(ob.score == 12 and not ob.complet)
ob = Observation(json.dumps(dict((obs_1, loc3))))
self.assertTrue(ob.score == 20 and not ob.complet)
ob = Observation(json.dumps(dict((obs_1, loc3, dat1))))
self.assertTrue(ob.score == 21 and not ob.complet)
ob = Observation(json.dumps(dict((obs_1, loc3, dat3))))
self.assertTrue(ob.score == 22 and not ob.complet)
ob = Observation(json.dumps(dict((obs_1, _res(1)))))
self.assertTrue(ob.score == 0 and not ob.complet and ob.axes == [])
ob = Observation(json.dumps(dict((obs_1, _res(1), dat1))))
self.assertTrue(ob.score == 1 and ob.complet and ob.axes == [0])
ob = Observation(json.dumps(dict((obs_1, _res(3), dat3))))
self.assertTrue(ob.score == 2 and ob.complet and ob.axes == [0])
ob = Observation(json.dumps(dict((obs_1, _res(1), loc1))))
self.assertTrue(ob.score == 10 and ob.complet and ob.axes == [1])
ob = Observation(json.dumps(dict((obs_1, _res(1), loc1, dat1))))
self.assertTrue(ob.score == 11 and ob.complet and ob.axes == [0])
ob = Observation(json.dumps(dict((obs_1, _res(3), loc1, dat3))))
self.assertTrue(ob.score == 12 and ob.complet and ob.axes == [0])
ob = Observation(json.dumps(dict((obs_1, _res(3), loc3))))
self.assertTrue(ob.score == 20 and ob.complet and ob.axes == [1])
ob = Observation(json.dumps(dict((obs_1, _res(3), loc3, dat1))))
self.assertTrue(ob.score == 21 and ob.complet and ob.axes == [1])
ob = Observation(json.dumps(dict((obs_1, _res(3), loc3, dat3))), idxref=[0,0])
self.assertTrue(ob.score == 22 and ob.complet and ob.axes == [0])
ob = Observation(json.dumps(dict((obs_1, _res(6), loc3, dat2))))
self.assertTrue(ob.score == 23 and ob.complet and ob.axes == [0,1])
ob = Observation(json.dumps(dict((obs_1, _res(9)))))
self.assertTrue(ob.score == 0 and not ob.complet and ob.axes == [])
#ob = Observation(json.dumps(dict((obs_1, _res(9), dat1))))
#self.assertTrue(ob.score == 1 and not ob.complet and ob.axes == [])
ob = Observation(json.dumps(dict((obs_1, _res(3), dat3, prop1))))
self.assertTrue(ob.score == 102 and ob.complet and ob.axes == [0])
#ob = Observation(json.dumps(dict((obs_1, _res(6), dat3, prop1))))
#self.assertTrue(ob.score == 102 and not ob.complet and ob.axes == [])
ob = Observation(json.dumps(dict((obs_1, _res(1), loc1, prop1))))
self.assertTrue(ob.score == 110 and ob.complet and ob.axes == [1])
ob = Observation(json.dumps(dict((obs_1, _res(1), loc1, dat1, prop1))))
self.assertTrue(ob.score == 111 and ob.complet and ob.axes == [0])
ob = Observation(json.dumps(dict((obs_1, _res(3), loc1, dat3, prop1))))
self.assertTrue(ob.score == 112 and ob.complet and ob.axes == [0])
#ob = Observation(json.dumps(dict((obs_1, _res(6), loc1, dat3, prop1))))
#self.assertTrue(ob.score == 112 and not ob.complet and ob.axes == [])
ob = Observation(json.dumps(dict((obs_1, _res(6), loc3, prop2))))
self.assertTrue(ob.score == 223 and ob.complet and ob.axes == [1, 2])
ob = Observation(json.dumps(dict((obs_1, _res(3), loc3, prop3))), idxref=[0,0])
self.assertTrue(ob.score == 220 and ob.complet and ob.axes == [1])
ob = Observation(json.dumps(dict((obs_1, _res(3), loc3, dat1, prop3))), idxref=[0,1,1])
self.assertTrue(ob.score == 221 and ob.complet and ob.axes == [1])
ob = Observation(json.dumps(dict((obs_1, _res(6), loc3, dat1, prop2))))
self.assertTrue(ob.score == 224 and ob.complet and ob.axes == [1, 2])
ob = Observation(json.dumps(dict((obs_1, truc_mach, dat3, loc3, prop2, _res(18)))))
self.assertTrue(ob.score == 228 and ob.complet and ob.axes == [0, 1, 2])
ob = Observation(json.dumps(dict((obs_1, truc_mach, dat3, loc3, prop3, _res(3)))), idxref=[0,0,0])
self.assertTrue(ob.score == 222 and ob.complet and ob.axes == [0])
ob = Observation(json.dumps(dict((obs_1, dat3, loc3, prop2, _res(6)))), idxref = [0,0,2])
self.assertTrue(ob.score == 225 and ob.complet and ob.axes == [0, 2])
#ob.option["json_res_index"] = True
#print(ob.to_json(), '\n')
def test_obs_dim(self):
ob1 = Observation(dict((obs_1, truc_mach, dat2, loc3, prop3, _res(6))), idxref=[0,1,1])
self.assertTrue(ob1.score == 227 and ob1.complet and ob1.axes == [0, 1])
ob1 = Observation(dict((obs_1, truc_mach, dat3, loc2, prop3, _res(6))), idxref=[0,1,0])
self.assertTrue(ob1.score == 226 and ob1.complet and ob1.axes == [0, 1])
ob1 = Observation(dict((obs_1, truc_mach, dat3, loc3, prop3, _res(3))), idxref=[0,0,0])
self.assertTrue(ob1.score == 222 and ob1.complet and ob1.axes == [0])
def test_obs_majListName_majListValue(self):
ob1 = Observation(dict((obs_1, truc_mach, dat3, loc3, prop2, _res(18))))
ob1.majList(LocationValue, [pparis, plyon, pmarseille], name=False)
self.assertEqual(ob1.setLocation[2].vSimple(), pmarseille)
ob1.majList(DatationValue, [pt1, pt2, pt3], name=False)
self.assertEqual(ob1.setDatation[2].simple, pt3)
ob1.majList(LocationValue, ['paris', 'lyon', 'marseille'], 'name')
self.assertEqual(ob1.setLocation[2].name, 'marseille')
def test_obs_majIndex_iLoc(self):
ob1 = Observation(dict((obs_1, truc_mach, dat3, loc3, prop2, _res(18))))
self.assertEqual(ob1.iLoc(0,0,1)[ES.res_classES], '1')
ob1 = Observation(json.dumps(dict((obs_1, truc_mach, dat3, loc3, prop2, _res(18)))), order=[1,2,0])
self.assertEqual(ob1.iLoc(0,0,1)[ES.res_classES], '3')
ob1 = Observation(json.dumps(dict((obs_1, truc_mach, dat3, loc3, prop2, _res(18)))), order=[2,0,1])
self.assertEqual(ob1.iLoc(0,0,1)[ES.res_classES], '9')
ob1 = Observation(json.dumps(dict((obs_1, truc_mach, dat3, loc3, prop2, _res(6)))), idxref=[0,0,2], order=[0,2])
self.assertEqual(ob1.iLoc(0,0,1)[ES.res_classES], '1')
ob1 = Observation(json.dumps(dict((obs_1, truc_mach, dat3, loc3, prop2, _res(6)))), idxref=[0,0,2], order=[2,0])
self.assertEqual(ob1.iLoc(0,0,1)[ES.res_classES], '3')
ob1 = Observation(json.dumps(dict((obs_1, truc_mach, dat2, loc3, prop3, _res(6)))), idxref=[0,1,1], order=[1,0])
self.assertEqual(ob1.iLoc(0,1,1)[ES.res_classES], '2')
ob1 = Observation(json.dumps(dict((obs_1, truc_mach, dat3, loc2, prop3, _res(6)))), idxref=[0,1,0], order=[1,0])
self.assertEqual(ob1.iLoc(0,1,0)[ES.res_classES], '3')
ob1 = Observation(json.dumps(dict((obs_1, truc_mach, dat3, loc3, prop3, _res(3)))), idxref=[0,0,0], order=[0])
self.assertEqual(ob1.iLoc(1,1,1)[ES.res_classES], '1')
def test_append_obs(self):
ob = Observation(dict((obs_1, dat3, dpt3, prop2, _res(6))), idxref=[0,0,2])
ob1 = copy.copy(ob)
ob1.appendObs(ob)
self.assertEqual(ob1.setResult[6].value, ob)
self.assertEqual(ob1.setLocation[3], ob.bounds[1])
def test_obs_sort(self):
ob = Observation(dict((obs_1, dat3, dpt3, prop2, _res(6))), idxref=[0,0,2], order=[2,0])
self.assertEqual(str(ob.setResult), str([0, 1, 2, 3, 4, 5]))
ob.sort(order=[1,0,2])
self.assertEqual(str(ob.setResult), str([3, 0, 4, 1, 5, 2]))
ob.sort(order=[0,1,2])
self.assertEqual(str(ob.setResult), str([3, 0, 5, 2, 4, 1]))
ob.sort()
self.assertEqual(str(ob.setResult), str([0, 1, 2, 3, 4, 5]))
ob.sort(order=[2,0,1])
self.assertEqual(str(ob.setResult), str([3, 5, 4, 0, 2, 1]))
def test_obs_add(self):
ob = Observation(dict((obs_1, truc_mach, dat3, loc3, prop2, _res(18))))
ob.option["json_loc_name"] = ob.option["json_dat_name"] = ob.option["json_prp_name"] = True
obp = Observation(dict((obs_1, truc_mach, pdat3, ploc3, pprop2, _res(18))))
obp.option["json_loc_name"] = obp.option["json_dat_name"] = obp.option["json_prp_name"] = True
obc = copy.copy(ob)
obc.option["add_equal"] = "value"
obc.option["json_loc_name"] = obc.option["json_dat_name"] = obc.option["json_prp_name"] = True
obc += ob
self.assertEqual(json.loads(ob.to_json())[ES.res_classES], json.loads(obc.to_json())[ES.res_classES])
self.assertEqual(json.loads(ob.to_json())[ES.dat_classES], json.loads(obc.to_json())[ES.dat_classES])
self.assertEqual(json.loads(ob.to_json())[ES.loc_classES], json.loads(obc.to_json())[ES.loc_classES])
self.assertEqual(json.loads(ob.to_json())[ES.prp_classES], json.loads(obc.to_json())[ES.prp_classES])
ob2 = ob + ob
self.assertEqual(json.loads(ob.to_json())[ES.res_classES], json.loads(ob2.to_json())[ES.res_classES])
self.assertEqual(json.loads(ob.to_json())[ES.dat_classES], json.loads(ob2.to_json())[ES.dat_classES])
self.assertEqual(json.loads(ob.to_json())[ES.loc_classES], json.loads(ob2.to_json())[ES.loc_classES])
self.assertEqual(json.loads(ob.to_json())[ES.prp_classES], json.loads(ob2.to_json())[ES.prp_classES])
obc = copy.copy(ob)
obc += obp
self.assertEqual(json.loads(ob.to_json())[ES.res_classES] + json.loads(obp.to_json())[ES.res_classES],
json.loads(obc.to_json())[ES.res_classES])
self.assertEqual(json.loads(ob.to_json())[ES.dat_classES] + json.loads(obp.to_json())[ES.dat_classES],
json.loads(obc.to_json())[ES.dat_classES])
self.assertEqual(json.loads(ob.to_json())[ES.loc_classES] + json.loads(obp.to_json())[ES.loc_classES],
json.loads(obc.to_json())[ES.loc_classES])
self.assertEqual(json.loads(ob.to_json())[ES.prp_classES] + json.loads(obp.to_json())[ES.prp_classES],
json.loads(obc.to_json())[ES.prp_classES])
ob2 = ob + obp
self.assertEqual(json.loads(ob.to_json())[ES.res_classES] + json.loads(obp.to_json())[ES.res_classES],
json.loads(ob2.to_json())[ES.res_classES])
self.assertEqual(json.loads(ob.to_json())[ES.dat_classES] + json.loads(obp.to_json())[ES.dat_classES],
json.loads(ob2.to_json())[ES.dat_classES])
self.assertEqual(json.loads(ob.to_json())[ES.loc_classES] + json.loads(obp.to_json())[ES.loc_classES],
json.loads(ob2.to_json())[ES.loc_classES])
self.assertEqual(json.loads(ob.to_json())[ES.prp_classES] + json.loads(obp.to_json())[ES.prp_classES],
json.loads(ob2.to_json())[ES.prp_classES])
obp2 = Observation(json.dumps(dict((obs_1, truc_mach, dat3, loc3, pprop2, _res(18)))))
ob2 = ob + obp2
self.assertEqual(json.loads(ob.to_json())[ES.res_classES] + json.loads(obp.to_json())[ES.res_classES],
json.loads(ob2.to_json())[ES.res_classES])
self.assertEqual(json.loads(ob.to_json())[ES.dat_classES], json.loads(ob2.to_json())[ES.dat_classES])
self.assertEqual(json.loads(ob.to_json())[ES.loc_classES], json.loads(ob2.to_json())[ES.loc_classES])
self.assertEqual(json.loads(ob.to_json())[ES.prp_classES] + json.loads(obp2.to_json())[ES.prp_classES],
json.loads(ob2.to_json())[ES.prp_classES])
def test_obs_full(self):
ob=Observation('{"type": "observation", "datation": [{"date1": "2021-02-04T12:05:00"}, \
"2021-07-04T12:05:00", "2021-05-04T12:05:00"], \
"location": [{"paris": [2.35, 48.87]}, [4.83, 45.76], [5.38, 43.3]], \
"property": [{"prp": "PM25", "unit": "kg/m3"}, {"prp": "PM10", "unit": "kg/m3"}], \
"result": [[0, [0, 0, 0]], [1, [0, 0, 1]], [2, [1, 2, 0]], [3, [1, 1, 1]], \
[4, [2, 1, 0]], [5, [2, 2, 1]]]}')
ob1=ob.full(fillvalue=ResultValue(-1))
self.assertEqual(ob.iLoc(1,2,0), ob1.iLoc(1,2,0))
self.assertEqual(len(ob1), 18)
ob.full(fillvalue=ResultValue(-1), inplace=True)
self.assertEqual(ob.json, ob1.json)
def test_obs_extend(self):
obp = Observation(dict((_res(6), loc3, prop2)))
obc = Observation(dict((_res(6), dat3, prop2)))
ob = Observation(dict((_res(6), dat3, loc3, prop2)), idxref=[0,0,2])
obcc = obp | obc
self.assertEqual(obcc.ilist, ob.ilist)
ob = Observation(dict((_res(6), obs_1)))
ob.extend('datation', ['matin'], [0,0,0,0,0,0])
ob.extend('location', ['paris', 'lyon', 'marseille'], [0,1,2,0,1,2])
ob.extend('property', ['pm10', 'pm25'], [0,1,0,1,0,1])
self.assertEqual(ob.axes, [1,2])
self.assertTrue(ob.complet)
def test_sensor(self):
obs = Observation()
prop1 = PropertyValue(prop_pm25)
for i in range(6): # simule une boucle de mesure
obs.append(DatationValue(datetime(2021, 6, 4+i, 12, 5).isoformat()),
LocationValue([14+i, 40]), prop1, ResultValue(45+i))
#obs.majType()
obs.option["json_info_type"] = True
self.assertEqual(json.loads(obs.to_json())[ES.information]["typeobs"], ES.obsCat[122])
class TestExports(unittest.TestCase):
'''Unit tests for `ES.ESObservation.Observation` exports '''
@unittest.skipIf(mongo, "test envoi mongo")
def test__envoi_mongo(self):
ob = Observation(dict((obs_1, dat3, loc3, prop2, _res(6))), idxref=[0,0,2])
self.assertEqual(_envoi_mongo(ob), 200)
def test_geo_interface(self):
ob = Observation(dict([obs_1, loc3]))
_resloc = (tuple(paris), tuple(lyon), tuple(marseille))
_resgeo = dict([(ES.type,"MultiPoint"), ("coordinates",_resloc)])
self.assertEqual(ob.__geo_interface__, _resgeo)
self.assertEqual(ob.__geo_interface__["coordinates"], _resloc)
self.assertEqual(ob.__geo_interface__, _resgeo)
ob = Observation(dict((obs_1, dpt2, dat1)))
dpt2pt = {'type': 'Polygon', 'coordinates': (((0.5, 1.5), (0.0, 2.0),
(1.0, 2.0), (2.0, 2.0), (1.0, 1.0), (0.0, 1.0), (0.5, 1.5)),)}
self.assertEqual(ob.__geo_interface__, dpt2pt)
def test_obs_polygon(self):
ob = Observation(dict((obs_1, dat3, dpt2, prop2, _res(6))), idxref=[0,1,1])
self.assertEqual(ob.__geo_interface__, {'type': 'Polygon',
'coordinates': (((0.5, 1.5), (0.0, 2.0), (1.0, 2.0),
(2.0, 2.0), (1.0, 1.0), (0.0, 1.0), (0.5, 1.5)),)})
def test_to_numpy(self):
ob = Observation(dict((obs_1, dat3, loc3, prop2, _res(6))), idxref=[0,0,2], order=[2,0])
self.assertEqual(ob.to_numpy(func=ResultValue.vSimple, string=True)[1,1], '2.0')
self.assertEqual(ob.to_numpy(func=ResultValue.vSimple, string=False)[1,1], 2.0)
self.assertEqual(ob.to_numpy(func=ESValue.vName, genName='-')[1,1], '-')
self.assertEqual(ob.to_numpy(func=ResultValue.vSimple, string=True, ind='all')[1,1,0], '5.0')
def test_xarray(self):
ob = Observation(dict((obs_1, dat3, loc3, prop2, _res(6))), idxref=[0,0,2])
self.assertTrue(ob.to_xarray()[2,1].item() == ResultValue(2))
self.assertTrue(ob.to_xarray(ind='all')[2,2,1].item() == ResultValue(2))
ob = Observation(dict((obs_1, dat3, loc3, prop1, _res(3))), idxref=[0,0,2])
self.assertTrue(ob.to_xarray()[1].item() == ResultValue(2))
ob = Observation(dict((obs_1, dat3, loc3, prop3, _res(3))), idxref=[0,0,0])
self.assertTrue(ob.to_xarray()[1].item() == ResultValue(2))
ob = Observation(dict((obs_1, dat3, loc3, prop2, _res(18))))
self.assertTrue(ob.to_xarray()[2,1,0].item() == ResultValue(9))
ob = Observation(dict((obs_1, dat3, loc2, prop1, _res(6))))
self.assertTrue(ob.to_xarray()[2,0].item() == ResultValue(2))
'''@unittest.skipIf(plot, "test plot")
def test_plot(self):
ob = Observation(dict((obs_1, dat3ord, loc3, prop1, _res(9))), order='ldp')
self.assertTrue(ob.plot(line=True, size =5) == None)
self.assertTrue(ob.plot(line=False, size =5) == None)
ob = Observation(dict((obs_1, dat3, loc3, prop2, _res(18))), order='dlp')
self.assertTrue(ob.plot(line=True) == None)
self.assertTrue(ob.plot(line=False) == None)
self.assertTrue(ob.voxel(sort=True) == None)
ob.full()
self.assertTrue(ob.plot(line=True) == None)
self.assertTrue(ob.plot(line=False) == None)
ob = Observation(dict((obs_1, dat3, loc3, prop2, _res(6))), order='xp')
self.assertTrue(ob.plot(line=True) == None)
self.assertTrue(ob.plot(line=False) == None)
self.assertTrue(ob.voxel(sort=True) == None)
ob.full()
self.assertTrue(ob.plot(line=True) == None)
self.assertTrue(ob.plot(line=False) == None)
ob = Observation(dict((obs_1, dat3, loc2, prop3, _res(6))), order='xl')
self.assertTrue(ob.plot(line=True) == None)
self.assertTrue(ob.plot(line=False) == None)
self.assertTrue(ob.voxel(sort=True) == None)
ob.full()
self.assertTrue(ob.plot(line=True) == None)
self.assertTrue(ob.plot(line=False) == None)
ob = Observation(dict((obs_1, dat2, loc3, prop3, _res(6))), order='xd')
self.assertTrue(ob.plot(line=True) == None)
self.assertTrue(ob.plot(line=False) | |
<reponame>mansi-team/mansi_corpus
import re
import copy
import os
class DumbMorphParser:
"""
Contains methods that add context-independent word-level
morhological information from a parsed word list to a
collection of JSON sentences. No actual parsing takes
place here.
"""
rxWordsRNC = re.compile('<w>(<ana.*?/(?:ana)?>)([^<>]+)</w>', flags=re.DOTALL)
rxAnalysesRNC = re.compile('<ana *([^<>]+)(?:></ana>|/>)\\s*')
rxAnaFieldRNC = re.compile('([^ <>"=]+) *= *"([^<>"]+)')
rxSplitGramTags = re.compile('[,, /=]')
rxHyphenParts = re.compile('[^\\-]+|-+')
rxGlossParts = re.compile('[^ \\-=<>]+')
rxGlossIndexPart = re.compile('^(.*)\\{(.*?)\\}')
rxBracketGloss = re.compile('[.-]?\\[.*?\\]')
def __init__(self, settings, categories, errorLog=''):
self.settings = copy.deepcopy(settings)
self.categories = copy.deepcopy(categories)
self.rxAllGlosses = self.prepare_gloss_regex()
self.analyses = {}
self.errorLog = errorLog
self.grammRules = []
if 'multivalued_ana_features' in self.settings:
self.settings['multivalued_ana_features'] = set(self.settings['multivalued_ana_features'])
else:
self.settings['multivalued_ana_features'] = set()
if 'gramtags_exclude' in self.settings:
self.settings['gramtags_exclude'] = set(self.settings['gramtags_exclude'])
else:
self.settings['gramtags_exclude'] = set()
if ('parsed_wordlist_filename' in self.settings
and len(self.settings['parsed_wordlist_filename']) > 0):
if type(self.settings['parsed_wordlist_filename']) == str:
self.load_analyses(os.path.join(self.settings['corpus_dir'],
self.settings['parsed_wordlist_filename']))
else:
for language in self.settings['parsed_wordlist_filename']:
self.load_analyses(os.path.join(self.settings['corpus_dir'],
self.settings['parsed_wordlist_filename'][language]),
language)
self.load_rules()
def load_rules(self):
"""
Load rules for converting the glosses into bags of grammatical
tags.
"""
self.load_gramm_rules(os.path.join(self.settings['corpus_dir'], 'conf/gramRules.txt'))
@staticmethod
def prepare_rule(rule):
"""
Make a compiled regex out of a rule represented as a string.
"""
def replReg(s):
if "'" in s:
return ''
return ' re.search(\'' + s + \
'\', ana[\'parts\']) is not None or ' + \
're.search(\'' + s + \
'\', ana[\'gloss\']) is not None '
ruleParts = rule.split('"')
rule = ''
for i in range(len(ruleParts)):
if i % 2 == 0:
rule += re.sub('([^\\[\\]~|& \t\']+)', ' \'\\1\' in tagsAndGlosses ',
ruleParts[i]).replace('|', ' or ').replace('&', ' and ') \
.replace('~', ' not ').replace('[', '(').replace(']', ')')
else:
rule += replReg(ruleParts[i])
return rule
def load_gramm_rules(self, fname):
"""
Load main set of rules for converting the glosses into bags
of grammatical tags.
"""
if len(fname) <= 0 or not os.path.isfile(fname):
return
rules = []
f = open(fname, 'r', encoding='utf-8-sig')
for line in f:
line = re.sub('#.*', '', line).strip()
if len(line) > 0:
rule = [i.strip() for i in line.split('->')]
if len(rule) != 2:
continue
rule[1] = set(rule[1].split(','))
rule[0] = self.prepare_rule(rule[0])
rules.append(rule)
f.close()
self.grammRules = rules
def log_message(self, message):
"""
If the filename of the error log is not empty, append
the message to the file.
"""
if self.errorLog is None or len(self.errorLog) <= 0:
return
try:
fLog = open(self.errorLog, 'a', encoding='utf-8')
fLog.write(message + '\n')
fLog.close()
except:
return
def load_analyses(self, fname, lang=''):
"""
Load parsed word list from a file.
"""
if lang == '':
if 'languages' in self.settings and len(self.settings['languages']) > 0:
lang = self.settings['languages'][0]
else:
lang = self.settings['corpus_name']
self.analyses[lang] = {}
try:
f = open(fname, 'r', encoding='utf-8-sig')
text = f.read()
f.close()
if self.settings['parsed_wordlist_format'] == 'xml_rnc':
self.load_analyses_xml_rnc(text, lang=lang)
except FileNotFoundError:
fLog = open(self.errorLog, 'a', encoding='utf-8')
fLog.write('File not found: ' + fname + '\n')
fLog.close()
def transform_gramm_str(self, grStr, lang=''):
"""
Transform a string with gramtags into a JSON object.
"""
grJSON = {}
grTags = self.rxSplitGramTags.split(grStr)
for tag in grTags:
if len(tag.strip()) <= 0:
continue
if tag in self.settings['gramtags_exclude']:
continue
if tag not in self.categories[lang]:
print('No category for a gramtag:', tag, ', language:', lang)
continue
cat = 'gr.' + self.categories[lang][tag]
if cat not in grJSON:
grJSON[cat] = tag
else:
if type(grJSON[cat]) != list:
grJSON[cat] = [grJSON[cat]]
if tag not in grJSON[cat]:
grJSON[cat].append(tag)
return grJSON
def prepare_gloss_regex(self):
"""
Return a regex that finds all glosses.
"""
regexes = {}
for lang in self.settings['languages']:
if lang not in self.categories:
self.categories[lang] = {}
if 'glosses' in self.settings and lang in self.settings['glosses']:
sRegex = '|'.join(re.escape(g) for g in sorted(self.settings['glosses'][lang], key=len))
sRegex = '\\b(' + sRegex + ')\\b'
regexes[lang] = re.compile(sRegex)
else:
sRegex = '|'.join(re.escape(g) for g in sorted(self.categories[lang], key=len))
sRegex = '\\b(' + sRegex + ')\\b'
regexes[lang] = re.compile(sRegex, flags=re.I)
return regexes
def gloss2gr(self, ana, lang, useGlossList=False):
"""
For an analysis that has glosses, but no tags for inflectional
categories, add these categories.
If useGlossList, use the list of glosses to distinguish between
glosses and stem translations. In the opposite case, consider
everyjting other than "STEM" a gloss.
"""
# TODO: Add rules for translating the glosses into tags.
if 'gloss_index' not in ana:
return
if useGlossList:
glosses = self.rxAllGlosses[lang].findall(ana['gloss_index'])
else:
glosses = [self.rxGlossIndexPart.search(g).group(1)
for g in self.rxGlossParts.findall(ana['gloss_index'])]
if 'glosses_covert' in ana:
glosses += ana['glosses_covert']
del ana['glosses_covert']
addedGrammTags = set()
tagsAndGlosses = set()
for field in ana:
if field.startswith('gr.'):
if type(ana[field]) == str:
tagsAndGlosses.add(ana[field])
elif type(ana[field]) == list:
tagsAndGlosses |= set(ana[field])
tagsAndGlosses |= set(gl.strip('-=:.<>') for gl in glosses)
if len(self.grammRules) > 0:
for rule in self.grammRules:
if eval(rule[0]):
addedGrammTags |= rule[1]
else:
for gl in glosses:
if gl.upper() == gl:
gl = gl.lower()
addedGrammTags.add(gl)
# print(list(addedGrammTags), list(tagsAndGlosses))
for tag in addedGrammTags:
if tag in self.categories[lang]:
anaCatName = 'gr.' + self.categories[lang][tag]
if anaCatName not in ana:
ana[anaCatName] = tag
else:
if type(ana[anaCatName]) == str:
ana[anaCatName] = [ana[anaCatName], tag]
elif tag not in ana[field]:
ana[anaCatName].append(tag)
def find_stems(self, glossIndex, lang):
"""
Return all glosses that are not in the categories list, and
therefore are the glosses for the stem.
"""
stems = []
newIndexGloss = ''
for glossPart in glossIndex.split('-'):
if len(glossPart) <= 0:
continue
m = self.rxGlossIndexPart.search(glossPart)
if m is None:
newIndexGloss += glossPart + '-'
continue
gloss, part = m.group(1), m.group(2)
if self.rxAllGlosses[lang].match(gloss) is None:
stems.append((gloss, part))
newIndexGloss += 'STEM{' + part + '}-'
else:
newIndexGloss += glossPart + '-'
return stems, newIndexGloss
def process_gloss_in_ana(self, ana, gloss_lang=''):
"""
If there are fields 'gloss' and 'parts' in the JSON
analysis, add field 'gloss_index' that contains the
glossed word in such a form that it could be queried
with the gloss query language.
If gloss_lang is not empty, look in fields "gloss_%gloss_lang%"
etc. instead of just "gloss". This may be needed if
there are glosses in more than one metalanguage.
Modify the source analysis, do not return anything.
"""
if len(gloss_lang) > 0:
gloss_lang = '_' + gloss_lang
if 'gloss' + gloss_lang not in ana or 'parts' not in ana:
return
wordParts = self.rxGlossParts.findall(ana['parts'].replace('{', '(').replace('{', ')').replace(' ', '.'))
glosses = self.rxGlossParts.findall(ana['gloss' + gloss_lang])
glossesOvert = [g for g in glosses if self.rxBracketGloss.search(g) is None]
glossesCovert = [g.strip('[]') for g in glosses if self.rxBracketGloss.search(g) is not None]
if len(wordParts) <= 0 or len(glosses) == 0 or len(wordParts) != len(glossesOvert):
self.log_message('Wrong gloss or partitioning: ' + ana['parts'] + ' != ' + ana['gloss' + gloss_lang])
return
glossIndex = '-'.join(p[1] + '{' + p[0] + '}'
for p in zip(wordParts, glossesOvert)) + '-'
ana['gloss_index' + gloss_lang] = glossIndex
if len(glossesCovert) > 0:
ana['glosses_covert' + gloss_lang] = glossesCovert
def transform_ana_rnc(self, ana, lang=''):
"""
Transform analyses for a single word, written in the XML
format used in Russian National Corpus, into a JSON object.
"""
setAna = set(self.rxAnalysesRNC.findall(ana.replace('\t', '')))
analyses = []
for ana in setAna:
fields = self.rxAnaFieldRNC.findall(ana)
if len(fields) <= 0:
continue
anaJSON = {}
for k, v in fields:
if k == 'gr':
anaJSON.update(self.transform_gramm_str(v, lang=lang))
elif k in self.settings['multivalued_ana_features']:
anaJSON[k] = [tag.strip() for tag in v.split()]
else:
anaJSON[k] = v
self.process_gloss_in_ana(anaJSON)
analyses.append(anaJSON)
return analyses
def load_analyses_xml_rnc(self, text, lang=''):
"""
Load analyses from a string in the XML format used
in Russian National Corpus.
"""
if lang == '':
if 'languages' in self.settings and len(self.settings['languages']) > 0:
lang = self.settings['languages'][0]
else:
lang = self.settings['corpus_name']
# there can be several languages if the corpus is parallel
analyses = self.rxWordsRNC.findall(text)
if lang not in self.analyses:
self.analyses[lang] = {}
iAna = 1
print('Loading analyses...')
for ana in analyses:
if iAna % 20000 == 0:
print('Loading analysis #' + str(iAna))
word = ana[1].strip('$&^#%*·;·‒–—―•…‘’‚“‛”„‟"\'')
if len(word) <= 0:
continue
if iAna <= 50000: # We assume the analyses are ordered by word frequency
ana = self.transform_ana_rnc(ana[0], lang=lang)
else:
ana = ana[0] # Avoid huge memory consumption at the expense of time
if word not in self.analyses[lang]:
self.analyses[lang][word] = ana
iAna += 1
print('Analyses for', len(self.analyses[lang]), 'different words loaded.')
def normalize(self, word):
"""
Normalize a word before searching for it in the list of analyses.
"""
word = word.strip().lower()
if 'char_replacements' in self.settings:
wordClean = ''
for c in word:
if c in self.settings['char_replacements']:
wordClean += | |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from scipy import interpolate
# from utils.utils import bilinear_sampler, coords_grid
class FlowHead(nn.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = nn.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class ConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192+128):
super(ConvGRU, self).__init__()
self.convz = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
self.convr = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
self.convq = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
def forward(self, h, x):
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz(hx))
r = torch.sigmoid(self.convr(hx))
q = torch.tanh(self.convq(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
return h
class SepConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=192+128):
super(SepConvGRU, self).__init__()
self.convz1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convr1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convq1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
self.convz2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convr2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
self.convq2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
def forward(self, h, x):
# horizontal
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz1(hx))
r = torch.sigmoid(self.convr1(hx))
q = torch.tanh(self.convq1(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
# vertical
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz2(hx))
r = torch.sigmoid(self.convr2(hx))
q = torch.tanh(self.convq2(torch.cat([r*h, x], dim=1)))
h = (1-z) * h + z * q
return h
class SmallMotionEncoder(nn.Module):
def __init__(self, corr_levels, corr_radius):
super(SmallMotionEncoder, self).__init__()
cor_planes = corr_levels * (2*corr_radius + 1)**2
self.convc1 = nn.Conv2d(cor_planes, 96, 1, padding=0)
self.convf1 = nn.Conv2d(2, 64, 7, padding=3)
self.convf2 = nn.Conv2d(64, 32, 3, padding=1)
self.conv = nn.Conv2d(128, 80, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = torch.cat([cor, flo], dim=1)
out = F.relu(self.conv(cor_flo))
return torch.cat([out, flow], dim=1)
class BasicMotionEncoder(nn.Module):
def __init__(self, corr_levels, corr_radius,):
super(BasicMotionEncoder, self).__init__()
cor_planes = corr_levels * (2*corr_radius + 1)**2
self.convc1 = nn.Conv2d(cor_planes, 256, 1, padding=0)
self.convc2 = nn.Conv2d(256, 192, 3, padding=1)
self.convf1 = nn.Conv2d(2, 128, 7, padding=3)
self.convf2 = nn.Conv2d(128, 64, 3, padding=1)
self.conv = nn.Conv2d(64+192, 128-2, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
cor = F.relu(self.convc2(cor))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = torch.cat([cor, flo], dim=1)
out = F.relu(self.conv(cor_flo))
return torch.cat([out, flow], dim=1)
class SmallUpdateBlock(nn.Module):
def __init__(self, corr_levels, corr_radius, hidden_dim=96):
super(SmallUpdateBlock, self).__init__()
self.encoder = SmallMotionEncoder(corr_levels, corr_radius)
self.gru = ConvGRU(hidden_dim=hidden_dim, input_dim=82+64)
self.flow_head = FlowHead(hidden_dim, hidden_dim=128)
def forward(self, net, inp, corr, flow):
motion_features = self.encoder(flow, corr)
inp = torch.cat([inp, motion_features], dim=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
return net, None, delta_flow
class BasicUpdateBlock(nn.Module):
def __init__(self, corr_levels, corr_radius, hidden_dim=128, input_dim=128):
super(BasicUpdateBlock, self).__init__()
self.encoder = BasicMotionEncoder(corr_levels, corr_radius,)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128+hidden_dim)
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
self.mask = nn.Sequential(
nn.Conv2d(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 64*9, 1, padding=0))
def forward(self, net, inp, corr, flow, upsample=True):
motion_features = self.encoder(flow, corr)
inp = torch.cat([inp, motion_features], dim=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
# scale mask to balence gradients
mask = .25 * self.mask(net)
return net, mask, delta_flow
class ResidualBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1:
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes)
self.norm2 = nn.BatchNorm2d(planes)
if not stride == 1:
self.norm3 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes)
self.norm2 = nn.InstanceNorm2d(planes)
if not stride == 1:
self.norm3 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
if not stride == 1:
self.norm3 = nn.Sequential()
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x+y)
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(BottleneckBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes//4, kernel_size=1, padding=0)
self.conv2 = nn.Conv2d(planes//4, planes//4, kernel_size=3, padding=1, stride=stride)
self.conv3 = nn.Conv2d(planes//4, planes, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
num_groups = planes // 8
if norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if not stride == 1:
self.norm4 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(planes//4)
self.norm2 = nn.BatchNorm2d(planes//4)
self.norm3 = nn.BatchNorm2d(planes)
if not stride == 1:
self.norm4 = nn.BatchNorm2d(planes)
elif norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(planes//4)
self.norm2 = nn.InstanceNorm2d(planes//4)
self.norm3 = nn.InstanceNorm2d(planes)
if not stride == 1:
self.norm4 = nn.InstanceNorm2d(planes)
elif norm_fn == 'none':
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
self.norm3 = nn.Sequential()
if not stride == 1:
self.norm4 = nn.Sequential()
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm4)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
y = self.relu(self.norm3(self.conv3(y)))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x+y)
class BasicEncoder(nn.Module):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):
super(BasicEncoder, self).__init__()
self.norm_fn = norm_fn
if self.norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64)
elif self.norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(64)
elif self.norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(64)
elif self.norm_fn == 'none':
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 64
self.layer1 = self._make_layer(64, stride=1)
self.layer2 = self._make_layer(96, stride=2)
self.layer3 = self._make_layer(128, stride=2)
# output convolution
self.conv2 = nn.Conv2d(128, output_dim, kernel_size=1)
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
# if input is list, combine batch dimension
is_list = isinstance(x, tuple) or isinstance(x, list)
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if self.training and self.dropout is not None:
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x
class SmallEncoder(nn.Module):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):
super(SmallEncoder, self).__init__()
self.norm_fn = norm_fn
if self.norm_fn == 'group':
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=32)
elif self.norm_fn == 'batch':
self.norm1 = nn.BatchNorm2d(32)
elif self.norm_fn == 'instance':
self.norm1 = nn.InstanceNorm2d(32)
elif self.norm_fn == 'none':
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(3, 32, kernel_size=7, stride=2, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 32
self.layer1 = self._make_layer(32, stride=1)
self.layer2 = self._make_layer(64, stride=2)
self.layer3 = self._make_layer(96, stride=2)
self.dropout = None
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
self.conv2 = nn.Conv2d(96, output_dim, kernel_size=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = BottleneckBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = BottleneckBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
# if input is list, combine batch dimension
is_list = isinstance(x, tuple) or isinstance(x, list)
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if self.training and self.dropout is not None:
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x
class CorrBlock:
def __init__(self, fmap1, fmap2, num_levels=4, radius=4):
self.num_levels = num_levels
self.radius = radius
self.corr_pyramid = []
# all pairs correlation
corr = CorrBlock.corr(fmap1, fmap2)
batch, h1, w1, dim, h2, w2 = corr.shape
corr = corr.reshape(batch*h1*w1, dim, h2, w2)
self.corr_pyramid.append(corr)
for i in range(self.num_levels-1):
corr = F.avg_pool2d(corr, 2, stride=2)
self.corr_pyramid.append(corr)
def __call__(self, coords):
r = self.radius
coords = coords.permute(0, 2, 3, 1)
batch, h1, w1, _ = coords.shape
out_pyramid = []
for i in range(self.num_levels):
corr = self.corr_pyramid[i]
dx = torch.linspace(-r, r, 2*r+1)
dy = torch.linspace(-r, r, 2*r+1)
delta | |
<filename>docs/autodoc.py
# Copyright (c) AIRBUS and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ast
import importlib
import inspect
import json
import logging
import os
import pkgutil
import re
import sys
import urllib
from functools import lru_cache
from glob import glob
from typing import List, Tuple
import skdecide
NOTEBOOKS_LIST_PLACEHOLDER = "[[notebooks-list]]"
logger = logging.getLogger(__name__)
refs = set()
# https://stackoverflow.com/questions/48879353/how-do-you-recursively-get-all-submodules-in-a-python-package
def find_abs_modules(package):
path_list = []
spec_list = []
for importer, modname, ispkg in pkgutil.walk_packages(package.__path__):
if modname == "hub.__skdecide_hub_cpp":
continue
import_path = f"{package.__name__}.{modname}"
if ispkg:
spec = pkgutil._get_spec(importer, modname)
try:
importlib._bootstrap._load(spec)
spec_list.append(spec)
except Exception as e:
print(f"Could not load package {modname}, so it will be ignored ({e}).")
else:
path_list.append(import_path)
for spec in spec_list:
del sys.modules[spec.name]
return path_list
def py_parse(filepath): # avoid using ast package just for extracting file doc string?
with open(filepath) as fd:
file_contents = fd.read()
module = ast.parse(file_contents)
docstring = ast.get_docstring(module)
docstring = "" if docstring is None else docstring.strip()
name = os.path.splitext(os.path.basename(filepath))[0]
return docstring, name, file_contents
@lru_cache(maxsize=1000)
def get_ref(object):
name = getattr(object, "__qualname__", None)
if name is None:
name = getattr(object, "__name__", None)
if name is None:
name = object._name
reflist = [name]
if hasattr(object, "__module__"):
reflist.insert(0, object.__module__)
ref = ".".join(reflist)
refs.add(ref)
return ref
def format_doc(doc):
# Replace references like "#obj.func()" by "`obj.func()`" for Markdown code highlighting - TODO: replace in doc?
doc = re.sub(r"#(?P<ref>[\w\.,()]*[\w()])", lambda m: f'`{m.group("ref")}`', doc)
# Replace content of "# Parameters" by list of parameters
def list_content(content):
content = re.sub(
r"^(?P<param>\w+)",
lambda m: f'- **{m.group("param")}**',
content,
flags=re.MULTILINE,
)
return content.strip()
doc = re.sub(
r"^(?<=# Parameters\n)(?P<content>(?:\n?\s*\w.*)+)",
lambda m: list_content(m.group("content")),
doc,
flags=re.MULTILINE,
)
# Replace "# Title" (e.g. "# Parameters") by "#### Title"
doc = re.sub(r"^# (?=Parameters|Returns|Example)", "#### ", doc, flags=re.MULTILINE)
# Replace "!!! container" (e.g. "!!! tip") by "::: container [...] :::"
def strip_content(content):
content = re.sub(r"^ {4}", "", content, flags=re.MULTILINE)
return content.rstrip()
doc = re.sub(
r"!!! (?=tip|warning|danger)(?P<head>.*)\n(?P<content>(?:\n? {4,}.*)+)",
lambda m: f'::: {m.group("head")}\n{strip_content(m.group("content"))}\n:::',
doc,
)
return doc
def add_func_method_infos(func_method, autodoc):
if inspect.isfunction(func_method):
autodoc["type"] = "function"
elif inspect.ismethod(func_method):
autodoc["type"] = "method"
# Get signature
signature = inspect.signature(func_method)
parameters = signature.parameters
params = []
for k, v in parameters.items():
if not (k == "self" and func_method.__name__ == "__init__"):
parameter = parameters[k]
param = {"name": k}
if parameter.default != signature.empty:
param["default"] = str(parameter.default)
if "lambda" in param["default"]:
param["default"] = "<lambda function>" # TODO: improve?
if parameter.annotation != signature.empty:
param["annotation"] = parameter.annotation
params.append(param)
autodoc["signature"] = {"params": params}
if signature.return_annotation != signature.empty:
autodoc["signature"]["return"] = signature.return_annotation
def add_basic_member_infos(member, autodoc):
try:
autodoc["ref"] = get_ref(member)
source, line = inspect.getsourcelines(member)
autodoc["source"] = "".join(source) # TODO: keep?
autodoc["line"] = line
doc = inspect.getdoc(member)
if doc is not None:
autodoc["doc"] = format_doc(doc)
except Exception: # can happen e.g. when member is TypeVar
pass
def json_escape(obj):
return json.dumps(obj).replace("'", r"\'").replace('"', "'")
def md_escape(md):
return re.sub(r"[_<>]", lambda m: f"\\{m.group()}", md)
def doc_escape(md):
return re.sub(r"[<]", lambda m: f"\\{m.group()}", md)
def write_signature(md, member):
if "signature" in member:
escape_json_sig = json_escape(member["signature"])
md += f'<skdecide-signature name= "{member["name"]}" :sig="{escape_json_sig}"></skdecide-signature>\n\n'
return md
def is_implemented(func_code):
return not func_code.strip().endswith("raise NotImplementedError")
def get_binder_link(
binder_env_repo_name: str,
binder_env_branch: str,
notebooks_repo_url: str,
notebooks_branch: str,
notebook_relative_path: str,
) -> str:
# binder hub url
jupyterhub = urllib.parse.urlsplit("https://mybinder.org")
# path to the binder env
binder_path = f"v2/gh/{binder_env_repo_name}/{binder_env_branch}"
# nbgitpuller query
notebooks_repo_basename = os.path.basename(notebooks_repo_url)
urlpath = f"tree/{notebooks_repo_basename}/{notebook_relative_path}"
next_url_params = urllib.parse.urlencode(
{
"repo": notebooks_repo_url,
"urlpath": urlpath,
"branch": notebooks_branch,
}
)
next_url = f"git-pull?{next_url_params}"
query = urllib.parse.urlencode({"urlpath": next_url})
# full link
link = urllib.parse.urlunsplit(
urllib.parse.SplitResult(
scheme=jupyterhub.scheme,
netloc=jupyterhub.netloc,
path=binder_path,
query=query,
fragment="",
)
)
return link
def get_github_link(
notebooks_repo_url: str,
notebooks_branch: str,
notebook_relative_path: str,
) -> str:
return f"{notebooks_repo_url}/blob/{notebooks_branch}/{notebook_relative_path}"
def get_repo_n_branches_for_binder_n_github_links() -> Tuple[bool, str, str, str, str]:
# repos + branches to use for binder environment and notebooks content.
creating_links = True
try:
binder_env_repo_name = os.environ["AUTODOC_BINDER_ENV_GH_REPO_NAME"]
except KeyError:
binder_env_repo_name = "airbus/scikit-decide"
try:
binder_env_branch = os.environ["AUTODOC_BINDER_ENV_GH_BRANCH"]
except KeyError:
binder_env_branch = "binder"
try:
notebooks_repo_url = os.environ["AUTODOC_NOTEBOOKS_REPO_URL"]
notebooks_branch = os.environ["AUTODOC_NOTEBOOKS_BRANCH"]
except KeyError:
# missing environment variables => no github and binder links creation
notebooks_repo_url = ""
notebooks_branch = ""
creating_links = False
logger.warning(
"Missing environment variables AUTODOC_NOTEBOOKS_REPO_URL "
"or AUTODOC_NOTEBOOKS_BRANCH to create github and binder links for notebooks."
)
return (
creating_links,
notebooks_repo_url,
notebooks_branch,
binder_env_repo_name,
binder_env_branch,
)
def extract_notebook_title_n_description(
notebook_filepath: str,
) -> Tuple[str, List[str]]:
# load notebook
with open(notebook_filepath, "rt") as f:
notebook = json.load(f)
# find title + description: from first cell, h1 title + remaining text.
# or title from filename else
title = ""
description_lines: List[str] = []
cell = notebook["cells"][0]
if cell["cell_type"] == "markdown":
if cell["source"][0].startswith("# "):
title = cell["source"][0][2:].strip()
description_lines = cell["source"][1:]
else:
description_lines = cell["source"]
if not title:
title = os.path.splitext(os.path.basename(notebook_filepath))[0]
return title, description_lines
if __name__ == "__main__":
docdir = os.path.dirname(os.path.abspath(__file__))
# ========== GATHER AUTODOC INFOS ==========
# Get all scikit-decide (sub)modules
modules = []
for m in find_abs_modules(skdecide):
try:
module = importlib.import_module(m)
modules.append(module)
except Exception as e:
print(f"Could not load module {m}, so it will be ignored ({e}).")
autodocs = []
for module in modules:
autodoc = {}
# Get module-level infos
autodoc["ref"] = get_ref(module)
doc = inspect.getdoc(module)
if doc is not None:
autodoc["doc"] = format_doc(doc)
members = inspect.getmembers(module, lambda x: inspect.getmodule(x) == module)
autodoc_members = []
for member_name, member in members:
member = inspect.unwrap(member)
# Get member-level infos
if getattr(member, "__doc__", None) is not None:
autodoc_member = {}
autodoc_member["name"] = member_name
add_basic_member_infos(member, autodoc_member)
if inspect.isfunction(member):
add_func_method_infos(member, autodoc_member)
elif inspect.isclass(member):
autodoc_member["type"] = "class"
autodoc_member["bases"] = (
list(map(get_ref, member.__bases__))
if member.__bases__ != (object,)
else None
)
autodoc_member["inheritance"] = list(
map(get_ref, inspect.getmro(member)[:-1])
)
submembers = inspect.getmembers(member)
autodoc_submembers = []
for submember_name, submember in submembers:
submember = inspect.unwrap(submember)
# Get class member-level infos
if (
submember_name == "__init__"
or not submember_name.startswith("__")
):
autodoc_submember = {}
autodoc_submember["name"] = (
submember_name
if submember_name != "__init__"
else member_name
)
add_basic_member_infos(submember, autodoc_submember)
# Find original owner class of this member (in class inheritance)
if submember_name == "__init__":
autodoc_submember["owner"] = member_name
else:
for cls in inspect.getmro(member):
if hasattr(cls, submember_name):
autodoc_submember["owner"] = cls.__name__
if (
inspect.isfunction(submember)
or inspect.ismethod(submember)
or submember_name == "__init__"
):
add_func_method_infos(submember, autodoc_submember)
else:
# Class variables (e.g. T_memory, T_agent...)
autodoc_submember["type"] = "variable"
if (
"doc" in autodoc_submember
or autodoc_submember.get("type") == "variable"
):
autodoc_submembers.append(autodoc_submember)
autodoc_member["members"] = sorted(
autodoc_submembers,
key=lambda x: x["line"] if "line" in x else 0,
)
if "doc" in autodoc_member:
autodoc_members.append(autodoc_member)
autodoc["members"] = sorted(
autodoc_members, key=lambda x: x["line"] if "line" in x else 0
)
autodocs.append(autodoc)
# ========== GENERATE MARKDOWN FILES ==========
# Remove all previously auto-generated files
for oldpath in (
glob(f"{docdir}/reference/_*.md")
+ glob(f"{docdir}/guide/_*.md")
+ glob(f"{docdir}/.vuepress/public/notebooks/*.ipynb")
):
os.remove(oldpath)
# Generate Reference Markdown files (reference/_skdecide.*.md)
os.makedirs(f"{docdir}/reference", exist_ok=True)
for module in autodocs:
# Initiate Markdown
md = ""
# Write module title
md += f'# {module["ref"].split(".", 1)[-1]}\n\n'
# Write module doc (if any)
if "doc" in module:
md += f'{module["doc"]}\n\n'
# Write Table Of Content
md += "[[toc]]\n\n"
# Write domain spec summary
md += "::: tip\n<skdecide-summary></skdecide-summary>\n:::\n\n"
# Write members
for member in module["members"]:
# Write member title
md += f'## {md_escape(member["name"])}\n\n'
# Write member signature (if any)
md = write_signature(md, member)
# Write member doc (if any)
if "doc" in member:
md += f'{doc_escape(member["doc"])}\n\n'
# Write submembers (if any)
if "members" in member:
for submember in sorted(
member["members"],
key=lambda x: (x["name"].startswith("_"), x["name"]),
):
if submember["type"] != "variable":
# Write submember title
md += (
f'### {md_escape(submember["name"]) if submember["name"] != member["name"] else "Constructor"}'
f' <Badge text="{submember["owner"]}" type="{"tip" if submember["owner"] == member["name"] else "warn"}"/>\n\n'
)
# Write submember signature (if any)
md = write_signature(md, submember)
# Write submember doc (if any)
if "doc" in submember:
md += f'{doc_escape(submember["doc"])}\n\n'
with open(f'{docdir}/reference/_{module["ref"]}.md', "w") as f:
f.write(md)
# Write Reference index (reference/README.md)
REF_INDEX_MAXDEPTH = 5
ref_entries = sorted(
[tuple(m["ref"].split(".")) for m in autodocs],
key=lambda x: [
x[i] if i < len(x) - 1 else "" for i in range(REF_INDEX_MAXDEPTH)
],
) # tree-sorted entries
ref_entries = filter(
lambda e: len(e) <= REF_INDEX_MAXDEPTH, ref_entries
) # filter out by max depth
ref_entries = [
{"text": e[-1], "link": ".".join(e), "section": e[:-1]} for e in ref_entries
] # organize entries
reference = ""
sections = set()
for e in ref_entries:
for i in range(1, len(e["section"]) + 1):
section = | |
name of the workgroup to delete.
:type RecursiveDeleteOption: boolean
:param RecursiveDeleteOption:
The option to delete the workgroup and its contents even if the workgroup contains any named queries.
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_named_query(self, NamedQueryId: str) -> Dict:
"""
Returns information about a single query. Requires that you have access to the workgroup in which the query was saved.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetNamedQuery>`_
**Request Syntax**
::
response = client.get_named_query(
NamedQueryId='string'
)
**Response Syntax**
::
{
'NamedQuery': {
'Name': 'string',
'Description': 'string',
'Database': 'string',
'QueryString': 'string',
'NamedQueryId': 'string',
'WorkGroup': 'string'
}
}
**Response Structure**
- *(dict) --*
- **NamedQuery** *(dict) --*
Information about the query.
- **Name** *(string) --*
The query name.
- **Description** *(string) --*
The query description.
- **Database** *(string) --*
The database to which the query belongs.
- **QueryString** *(string) --*
The SQL query statements that comprise the query.
- **NamedQueryId** *(string) --*
The unique identifier of the query.
- **WorkGroup** *(string) --*
The name of the workgroup that contains the named query.
:type NamedQueryId: string
:param NamedQueryId: **[REQUIRED]**
The unique ID of the query. Use ListNamedQueries to get query IDs.
:rtype: dict
:returns:
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_query_execution(self, QueryExecutionId: str) -> Dict:
"""
Returns information about a single execution of a query if you have access to the workgroup in which the query ran. Each time a query executes, information about the query execution is saved with a unique ID.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryExecution>`_
**Request Syntax**
::
response = client.get_query_execution(
QueryExecutionId='string'
)
**Response Syntax**
::
{
'QueryExecution': {
'QueryExecutionId': 'string',
'Query': 'string',
'StatementType': 'DDL'|'DML'|'UTILITY',
'ResultConfiguration': {
'OutputLocation': 'string',
'EncryptionConfiguration': {
'EncryptionOption': 'SSE_S3'|'SSE_KMS'|'CSE_KMS',
'KmsKey': 'string'
}
},
'QueryExecutionContext': {
'Database': 'string'
},
'Status': {
'State': 'QUEUED'|'RUNNING'|'SUCCEEDED'|'FAILED'|'CANCELLED',
'StateChangeReason': 'string',
'SubmissionDateTime': datetime(2015, 1, 1),
'CompletionDateTime': datetime(2015, 1, 1)
},
'Statistics': {
'EngineExecutionTimeInMillis': 123,
'DataScannedInBytes': 123
},
'WorkGroup': 'string'
}
}
**Response Structure**
- *(dict) --*
- **QueryExecution** *(dict) --*
Information about the query execution.
- **QueryExecutionId** *(string) --*
The unique identifier for each query execution.
- **Query** *(string) --*
The SQL query statements which the query execution ran.
- **StatementType** *(string) --*
The type of query statement that was run. ``DDL`` indicates DDL query statements. ``DML`` indicates DML (Data Manipulation Language) query statements, such as ``CREATE TABLE AS SELECT`` . ``UTILITY`` indicates query statements other than DDL and DML, such as ``SHOW CREATE TABLE`` , or ``DESCRIBE <table>`` .
- **ResultConfiguration** *(dict) --*
The location in Amazon S3 where query results were stored and the encryption option, if any, used for query results. These are known as "client-side settings". If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup.
- **OutputLocation** *(string) --*
The location in Amazon S3 where your query results are stored, such as ``s3://path/to/query/bucket/`` . For more information, see `Queries and Query Result Files. <https://docs.aws.amazon.com/athena/latest/ug/querying.html>`__ If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup. The "workgroup settings override" is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration .
- **EncryptionConfiguration** *(dict) --*
If query results are encrypted in Amazon S3, indicates the encryption option used (for example, ``SSE-KMS`` or ``CSE-KMS`` ) and key information. This is a client-side setting. If workgroup settings override client-side settings, then the query uses the encryption configuration that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration and `Workgroup Settings Override Client-Side Settings <https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html>`__ .
- **EncryptionOption** *(string) --*
Indicates whether Amazon S3 server-side encryption with Amazon S3-managed keys (``SSE-S3`` ), server-side encryption with KMS-managed keys (``SSE-KMS`` ), or client-side encryption with KMS-managed keys (CSE-KMS) is used.
If a query runs in a workgroup and the workgroup overrides client-side settings, then the workgroup's setting for encryption is used. It specifies whether query results must be encrypted, for all queries that run in this workgroup.
- **KmsKey** *(string) --*
For ``SSE-KMS`` and ``CSE-KMS`` , this is the KMS key ARN or ID.
- **QueryExecutionContext** *(dict) --*
The database in which the query execution occurred.
- **Database** *(string) --*
The name of the database.
- **Status** *(dict) --*
The completion date, current state, submission time, and state change reason (if applicable) for the query execution.
- **State** *(string) --*
The state of query execution. ``QUEUED`` state is listed but is not used by Athena and is reserved for future use. ``RUNNING`` indicates that the query has been submitted to the service, and Athena will execute the query as soon as resources are available. ``SUCCEEDED`` indicates that the query completed without errors. ``FAILED`` indicates that the query experienced an error and did not complete processing. ``CANCELLED`` indicates that a user input interrupted query execution.
- **StateChangeReason** *(string) --*
Further detail about the status of the query.
- **SubmissionDateTime** *(datetime) --*
The date and time that the query was submitted.
- **CompletionDateTime** *(datetime) --*
The date and time that the query completed.
- **Statistics** *(dict) --*
The amount of data scanned during the query execution and the amount of time that it took to execute, and the type of statement that was run.
- **EngineExecutionTimeInMillis** *(integer) --*
The number of milliseconds that the query took to execute.
- **DataScannedInBytes** *(integer) --*
The number of bytes in the data that was queried.
- **WorkGroup** *(string) --*
The name of the workgroup in which the query ran.
:type QueryExecutionId: string
:param QueryExecutionId: **[REQUIRED]**
The unique ID of the query execution.
:rtype: dict
:returns:
"""
pass
def get_query_results(self, QueryExecutionId: str, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Returns the results of a single query execution specified by ``QueryExecutionId`` if you have access to the workgroup in which the query ran. This request does not execute the query but returns results. Use StartQueryExecution to run a query.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryResults>`_
**Request Syntax**
::
response = client.get_query_results(
QueryExecutionId='string',
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'UpdateCount': 123,
'ResultSet': {
'Rows': [
{
'Data': [
{
'VarCharValue': 'string'
},
]
},
],
'ResultSetMetadata': {
'ColumnInfo': [
{
'CatalogName': 'string',
'SchemaName': 'string',
'TableName': 'string',
'Name': 'string',
'Label': 'string',
'Type': 'string',
'Precision': 123,
'Scale': 123,
'Nullable': | |
<filename>pytwitcasting/api.py
import json
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from pytwitcasting.error import TwitcastingException
from pytwitcasting.parsers import ModelParser
from pprint import pprint
API_BASE_URL = 'https://apiv2.twitcasting.tv'
STATUS_CODES_TO_RETRY = (500)
def _requests_retry_session(retries=3,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
session=None):
""" リトライ用セッションの作成 """
session = session or requests.Session()
# リトライオブジェクトの作成。max_retriesに渡すため
retry = Retry(total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist)
# urllib3の組み込みHTTPアダプタ
adapter = HTTPAdapter(max_retries=retry)
# https:// に接続アダプタを設定する
session.mount('https://', adapter)
return session
class API(object):
""" APIにアクセスする """
def __init__(self, access_token=None, requests_session=True, application_basis=None,
accept_encoding=False, requests_timeout=None):
"""
:param access_token: アクセストークン
:type access_token: str
:param requsts_session: セッションオブジェクト or セッションを使うかどうか
:type requsts_session: :class:`requests.Session <requests.Session>` or bool
:param application_basis: (optional) TwitcastiongApplicationBasisオブジェクト
:type application_basis: :class:`TwitcastingApplicationBasis <pytwitcasting.auth.TwitcastingApplicationBasis>`
:param accept_encoding: (optional) レスポンスサイズが一定以上だった場合に圧縮するか
:type accept_encoding: bool
:param requests_timeout: (optional)タイムアウト時間
:type requests_timeout: int or float
"""
self._access_token = access_token
self.application_basis = application_basis
self.accept_encoding = accept_encoding
self.requests_timeout = requests_timeout
if isinstance(requests_session, requests.Session):
# Sessionオブジェクトが渡されていたら、それを使う
session = requests_session
else:
if requests_session is True:
# 新しくセッションを作る
session = requests.Session()
else:
# リクエストのたびに毎回セッションを生成し、閉じる(実質セッションを使っていない)
from requests import api
session = api
# リトライ用セッションの作成
self._session = _requests_retry_session(session=session)
def _auth_headers(self):
""" 認可情報がついたヘッダー情報を返す
:return: 認可情報がついたヘッダー
"""
if self._access_token:
return {'Authorization': f'Bearer {self._access_token}'}
elif self.application_basis:
return self.application_basis.get_basic_headers()
else:
return {}
def _internal_call(self, method, url, payload, json_data, params):
""" リクエストの送信
:param method: リクエストの種類
:param url: 送信先
:param payload: POSTリクエストの送信データ
:param json_data: POSTリクエストのJSONで送りたいデータ
:param params: クエリ文字列の辞書
:return: 呼び出したAPIの結果
"""
if not url.startswith('http'):
url = API_BASE_URL + url
args = dict(params=params)
args['timeout'] = self.requests_timeout
if payload:
args['data'] = json.dumps(payload)
if json_data:
args['json'] = json_data
# TODO: timeoutはどうするか
headers = self._auth_headers()
headers['X-Api-Version'] = '2.0'
headers['Accept'] = 'application/json'
if self.accept_encoding:
headers['Accept-Encoding'] = 'gzip'
# リトライ処理を行ってくれる
r = self._session.request(method, url, headers=headers, **args)
try:
r.raise_for_status()
except:
# len(None)だとTypeErrorになる確認してから
if r.text and r.text != 'null':
err = r.json()['error']
# エラー内容によってdetailsがあるときとない時があるため
if 'details' in err:
details = f"\n {err['details']}"
else:
details = ''
raise TwitcastingException(r.status_code, err['code'], f"{r.url}:\n {err['message']}{details}")
else:
raise TwitcastingException(r.status_code, -1, f'{r.url}:\n error')
finally:
# 一応呼んでおく
r.close()
if r.text and r.text != 'null':
if r.headers['Content-Type'] in ['image/jpeg', 'image/png']:
# 拡張子の取得
file_ext = r.headers['Content-Type'].replace('image/', '')
ret = {'bytes_data': r.content,
'file_ext': file_ext}
return ret
else:
return r.json()
else:
return None
def _get(self, url, args=None, payload=None, **kwargs):
""" GETリクエスト送信 """
if args:
kwargs.update(args)
return self._internal_call('GET', url, payload, None, kwargs)
def _post(self, url, args=None, payload=None, json_data=None, **kwargs):
""" POSTリクエスト送信 """
if args:
kwargs.update(args)
return self._internal_call('POST', url, payload, json_data, kwargs)
def _del(self, url, args=None, payload=None, **kwargs):
"""
DELETEリクエスト送信
"""
if args:
kwargs.update(args)
return self._internal_call('DELETE', url, payload, None, kwargs)
def _put(self, url, args=None, payload=None, **kwargs):
""" PUTリクエスト送信 """
if args:
kwargs.update(args)
return self._internal_call('PUT', url, payload, None, kwargs)
def get_user_info(self, user_id):
""" Get User Info
ユーザー情報を取得する
必須パーミッション: Read
:calls: `GET /users/:user_id <http://apiv2-doc.twitcasting.tv/#get-user-info>`_
:param user_id: ユーザーのidかscreen_id
:type user_id: str
:return: :class:`User <pytwitcasting.models.User>`
:rtype: :class:`User <pytwitcasting.models.User>`
"""
res = self._get(f'/users/{user_id}')
parser = ModelParser()
return parser.parse(self, res['user'], parse_type='user', payload_list=False)
def get_movie_info(self, movie_id):
res = self._get(f'/movies/{movie_id}')
parser = ModelParser()
res['movie'] = parser.parse(self, payload=res['movie'], parse_type='movie', payload_list=False)
res['broadcaster'] = parser.parse(self, payload=res['broadcaster'], parse_type='user', payload_list=False)
return res
def verify_credentials(self):
""" Verify Credentials
アクセストークンを検証し、ユーザ情報を取得する
必須パーミッション: Read
※ Authorization Code GrantかImplicitでないと、エラーになる
:calls: `GET /verify_credentials <http://apiv2-doc.twitcasting.tv/#verify-credentials>`_
:return: - ``app`` : アクセストークンに紐づく :class:`App <pytwitcasting.models.App>`
- ``user`` : アクセストークンに紐づく :class:`User <pytwitcasting.models.User>`
:rtype: dict
"""
res = self._get('/verify_credentials')
parser = ModelParser()
res['app'] = parser.parse(self, payload=res['app'], parse_type='app', payload_list=False)
res['user'] = parser.parse(self, payload=res['user'], parse_type='user', payload_list=False)
return res
def support_user(self, target_user_ids):
""" Support User
指定したユーザーのサポーターになる
必須パーミッション: Write
:calls: `PUT /support <http://apiv2-doc.twitcasting.tv/#support-user>`_
:param target_user_ids: サポーターになるユーザのidかscreen_idのリスト。1度に20人まで可能
:type target_user_ids: list[str]
:return: サポーター登録を行った件数
"""
# dataとして渡す
data = {'target_user_ids': target_user_ids}
res = self._put('/support', payload=data)
return res['added_count'] if res else None
def unsupport_user(self, target_user_ids):
""" Unsupport User
指定したユーザーのサポーターになる
必須パーミッション: Write
:calls: `PUT /unsupport <http://apiv2-doc.twitcasting.tv/#unsupport-user>`_
:param target_user_ids: サポーターを解除するユーザのidかscreen_idのリスト。1度に20人まで可能
:type target_user_ids: list[str]
:return: サポーター解除を行った件数
"""
# dataとして渡す
data = {'target_user_ids': target_user_ids}
res = self._put('/unsupport', payload=data)
return res['removed_count'] if res else None
def get_categories(self, lang='ja'):
""" Get Categories
配信中のライブがあるカテゴリのみを取得する
必須パーミッション: Read
:calls: `GET /categories <http://apiv2-doc.twitcasting.tv/#get-categories>`_
:param lang: (optional) 検索対象の言語. ``ja`` or ``en``
:type lang: str
:return: :class:`Category <pytwitcasting.models.Category>` の配列
:rtype: list[ :class:`Category <pytwitcasting.models.Category>` ]
"""
res = self._get('/categories', lang=lang)
parser = ModelParser()
return parser.parse(self, res['categories'], parse_type='category', payload_list=True)
def search_users(self, words, limit=10, lang='ja'):
""" Search Users
ユーザを検索する
必須パーミッション: Read
:calls: `GET /search/users <http://apiv2-doc.twitcasting.tv/#search-users>`_
:param words: AND検索する単語のリスト
:type words: list[str]
:param limit: (optional) 取得件数. min: ``1`` , max: ``50``
:type limit: int
:param lang: (optional) 検索対象のユーザの言語設定. 現在は ``'ja'`` のみ。日本語で設定しているユーザのみ検索可能
:type lang: str
:return: :class:`User <pytwitcasting.models.User>` の配列
:rtype: list[ :class:`User <pytwitcasting.models.User>` ]
"""
if isinstance(words, list):
w = ' '.join(words) if len(words) > 1 else words[0]
else:
w = words
res = self._get('/search/users', words=w, limit=limit, lang=lang)
parser = ModelParser()
return parser.parse(self, payload=res['users'], parse_type='user', payload_list=True)
def search_live_movies(self, search_type='new', context=None, limit=10, lang='ja'):
""" Search Live Movies
配信中のライブを検索する
必須パーミッション: Read
:calls: `GET /search/lives <http://apiv2-doc.twitcasting.tv/#search-live-movies>`_
:param search_type: (optional) 検索種別。
指定できる値は ``tag`` or ``word`` or ``category`` or ``new`` or ``recommend``
:type search_type: str
:param context: (optional) 検索内容. ``search_type`` の値によって決まる。詳しくはUsageを見て
:type context: list[str] or str or None
:param limit: (optional) 取得件数. min: ``1`` , max: ``100``
:type limit: int
:param lang: (optional) 検索対象のユーザの言語設定. 現在は ``ja`` のみ
:type lang: str
:return: :class:`Movie <pytwitcasting.models.Movie>` の配列
:rtype: list[ :class:`Movie <pytwitcasting.models.Movie>` ]
Usage::
# ex1) search_type='tag'.(context required)
>>> movies = api.search_live_movies(search_type='tag', context=['人気', '雑談'])
# ex2) search_type='word'.(context required)
>>> movies = api.search_live_movies(search_type='word', context=['ツイキャス', 'ゲーム'])
# ex3) search_type='category'.(context required).
# API.get_categories()で取得できるSubCategoryクラスの`id`を指定
>>> movies = api.search_live_movies(search_type='category', context='hobby_game_boys_jp')
# ex4) search_type='new'.(context none)
>>> movies = api.search_live_movies(search_type='new')
# ex4) search_type='recommend'.(context none)
>>> movies = api.search_live_movies(search_type='recommend')
"""
params = {'type': search_type, 'limit': limit, 'lang': lang}
# search_typeによってcontentを設定
if search_type and context:
if search_type in ['tag', 'word']:
# パラメータはurlencodeされるためエンコードされるし、
# ' 'を'+'に変換してくれているから、空白で結合し、渡す
if isinstance(context, list):
w = ' '.join(context) if len(context) > 1 else context[0]
else:
w = context
params['context'] = w
elif search_type in ['category']:
params['context'] = context
elif search_type in ['new', 'recommend']:
# 追加しない
pass
res = self._get('/search/lives', args=params)
parser = ModelParser()
for live_movie in res['movies']:
live_movie['movie'] = parser.parse(self, payload=live_movie['movie'],
parse_type='movie', payload_list=False)
live_movie['broadcaster'] = parser.parse(self, payload=live_movie['broadcaster'],
parse_type='user', payload_list=False)
return res
def get_webhook_list(self, limit=50, offset=0, user_id=None):
""" Get WebHook List
アプリケーションに紐づく WebHook の一覧を取得する
*アプリケーション単位でのみ実行可能(Basic)*
必須パーミッション: any
:calls: `GET /webhooks <http://apiv2-doc.twitcasting.tv/#get-webhook-list>`_
:param limit: 取得件数. min: ``1`` , max: ``100``
:type limit: int
:param offset: 先頭からの位置. min: ``0``
:type offset: int
:param user_id: 対象のユーザのidかscreen_id。imitとoffsetはuser_idが ``None`` のときのみ指定できる
:type user_id: str or None
:return: - ``all_count`` : このアプリに登録済みWebHook件数
- ``webhooks`` : WebHookの配列
:rtype: dict
"""
params = {}
if user_id:
params['user_id'] = user_id
else:
params['limit'] = limit
params['offset'] = offset
res = self._get('/webhooks', args=params)
parser = ModelParser()
res['webhooks'] = parser.parse(self, payload=res['webhooks'], parse_type='webhook', payload_list=True)
return res
def register_webhook(self, user_id, events):
""" Register WebHook
WebHookを新規登録します。これを使うには、アプリケーションでWebHook URLの登録が必須。
必須パーミッション: any
:calls: `POST /webhooks <http://apiv2-doc.twitcasting.tv/#register-webhook>`_
:param user_id: 対象のユーザのid
:type user_id: str
:param events: フックするイベント種別の配列。 ``livestart`` or ``liveend``
:type events: list[str]
:return: - ``user_id`` : 登録したユーザのid
- ``added_events`` : 登録したイベントの種類の配列
:rtype: dict
"""
data = {'user_id': user_id, 'events': events}
# そのまま渡す
return self._post('/webhooks', json_data=data)
def remove_webhook(self, user_id, events):
""" Remove WebHook
WebHookを削除する
必須パーミッション: any
:calls: `DELETE /webhooks <http://apiv2-doc.twitcasting.tv/#remove-webhook>`_
:param user_id: 対象のユーザのid
:type user_id: str
:param events: フックを削除するイベント種別の配列。 ``livestart`` or ``liveend``
:type events: list[str]
:return: - ``user_id`` : 登録したユーザのid
- ``removed_events`` : 削除されたイベントの種類の配列
:rtype: dict
"""
params = {'user_id': user_id, 'events[]': events}
return self._del('/webhooks', args=params)
def get_rtmp_url(self):
""" Get RTMP Url
アクセストークンに紐づくユーザの配信用のURL(RTMP)を取得する
必須パーミッション: *Broadcast*
:calls: `GET /rtmp_url <http://apiv2-doc.twitcasting.tv/#get-rtmp-url>`_
:return: - ``enabled`` : RTMP配信が有効かどうか
- ``url`` : RTMP配信用URL
- ``stream_key`` : RTMP配信用キー
:rtype: dict
"""
return self._get('/rtmp_url')
def get_webm_url(self):
""" Get WebM Url
アクセストークンに紐づくユーザの配信用のURL (WebM, WebSocket)を取得する
必須パーミッション: *Broadcast*
:calls: `GET /webm_url <http://apiv2-doc.twitcasting.tv/#get-webm-url>`_
:return: - ``enabled`` : WebM配信が有効かどうか
- ``url`` : WebM配信用URL
"""
# (WebMってなに!?)
return self._get('/webm_url')
def _get_live_thumbnail_image(self, user_id, size='small', position='latest'):
return self._get(f'/users/{user_id}/live/thumbnail', size=size, position=position)
def _get_movies_by_user(self, user_id, offset=0, limit=20):
res = self._get(f'/users/{user_id}/movies', offset=offset, limit=limit)
# 配列からMovieクラスの配列を作る
parser = ModelParser()
res['movies'] = parser.parse(self, res['movies'], parse_type='movie', payload_list=True)
return res
def _get_current_live(self, user_id):
# TODO: ライブ中ではない場合、エラーを返すでよいのか
res = self._get(f'/users/{user_id}/current_live')
parser = ModelParser()
res['movie'] = parser.parse(self, res['movie'], parse_type='movie', payload_list=False)
res['broadcaster'] = parser.parse(self, res['broadcaster'], parse_type='user', payload_list=False)
return res
def _get_comments(self, movie_id, offset=0, limit=10, slice_id=None):
params = {'offset': offset, 'limit': limit}
if slice_id:
params['slice_id'] = slice_id
res = self._get(f'/movies/{movie_id}/comments', args=params)
parser = ModelParser()
res['comments'] = parser.parse(self, res['comments'], parse_type='comment', payload_list=True)
return res
def _post_comment(self, movie_id, comment, sns='none'):
data = {'comment': comment, 'sns': | |
# Module containing functions that interact with the statline API
from typing import Union
from pathlib import Path
import requests
import xml.etree.ElementTree as ET
import pyarrow as pa
import dask.bag as db
from statline_bq.utils import _create_dir, _url_to_ndjson, convert_ndjsons_to_parquet
from statline_bq.log import logdec
@logdec
def _check_v4(id: str, third_party: bool = False) -> str:
"""Checks whether a certain CBS table exists as OData Version "v4".
Parameters
----------
id: str
CBS Dataset id, i.e. "83583NED"
third_party: bool, default=False
Flag to indicate dataset is not originally from CBS. Set to true
to use dataderden.cbs.nl as base url (not available in v4 yet).
Returns
-------
odata_version: str
"v4" if exists as odata v4, "v3" otherwise.
"""
# Third party ("dataderden..cbs.nl") do not have v4 implemenetd
if third_party:
return "v3"
base_url = {
True: None, # currently no IV3 links in ODATA V4,
False: f"https://odata4.cbs.nl/CBS/{id}",
}
r = requests.get(base_url[third_party])
if (
r.status_code == 200
): # TODO: Is this the best check to use? Maybe if not 404? Or something else?
odata_version = "v4"
else:
odata_version = "v3"
return odata_version
@logdec
def _get_urls(
id: str, odata_version: str, third_party: bool = False
) -> dict: # TODO: Rename to get_dataset_urls (contrast with get_table_urls)
"""Returns a dict with urls of all dataset tables given a valid CBS dataset id.
Parameters
----------
id: str
CBS Dataset id, i.e. "83583NED"
odata_version: str
version of the odata for this dataset - must be either "v3" or "v4".
third_party: bool, default=False
Flag to indicate dataset is not originally from CBS. Set to true
to use dataderden.cbs.nl as base url (not available in v4 yet).
Returns:
urls: dict of str
A dict containing all urls of a CBS dataset's tables
Examples:
>>> dataset_id = '83583NED'
>>> urls = get_urls(id=dataset_id, odata_version="v3", third_party=False)
>>> for name, url in urls.items():
... print(f"{name}: {url}")
TableInfos: https://opendata.cbs.nl/ODataFeed/odata/83583NED/TableInfos
UntypedDataSet: https://opendata.cbs.nl/ODataFeed/odata/83583NED/UntypedDataSet
TypedDataSet: https://opendata.cbs.nl/ODataFeed/odata/83583NED/TypedDataSet
DataProperties: https://opendata.cbs.nl/ODataFeed/odata/83583NED/DataProperties
CategoryGroups: https://opendata.cbs.nl/ODataFeed/odata/83583NED/CategoryGroups
BedrijfstakkenBranchesSBI2008: https://opendata.cbs.nl/ODataFeed/odata/83583NED/BedrijfstakkenBranchesSBI2008
Bedrijfsgrootte: https://opendata.cbs.nl/ODataFeed/odata/83583NED/Bedrijfsgrootte
Perioden: https://opendata.cbs.nl/ODataFeed/odata/83583NED/Perioden
"""
if odata_version == "v4":
base_url = {
True: None, # currently no IV3 links in ODATA V4,
False: f"https://odata4.cbs.nl/CBS/{id}",
}
urls = {
item["name"]: base_url[third_party] + "/" + item["url"]
for item in requests.get(base_url[third_party]).json()["value"]
}
elif odata_version == "v3":
base_url = {
True: f"https://dataderden.cbs.nl/ODataFeed/odata/{id}?$format=json",
False: f"https://opendata.cbs.nl/ODataFeed/odata/{id}?$format=json",
}
urls = {
item["name"]: item["url"]
for item in requests.get(base_url[third_party]).json()["value"]
}
else:
raise ValueError("odata version must be either 'v3' or 'v4'")
return urls
@logdec
def get_metadata_cbs(id: str, third_party: bool, odata_version: str) -> dict:
"""Retrieves a dataset's metadata from cbs.
Parameters
----------
id: str
CBS Dataset id, i.e. "83583NED"
third_party: bool
Flag to indicate dataset is not originally from CBS. Set to true
to use dataderden.cbs.nl as base url (not available in v4 yet).
odata_version : str
The version of the OData for this dataset - should be "v3" or "v4".
Returns
-------
dict
The dataset's metadata.
Raises
------
ValueError
If odata_version is not "v3" or "v4"
"""
catalog_urls = {
("v3", True): "https://dataderden.cbs.nl/ODataCatalog/Tables?$format=json",
("v3", False): "https://opendata.cbs.nl/ODataCatalog/Tables?$format=json",
("v4", False): f"https://odata4.cbs.nl/CBS/{id}/properties",
}
if odata_version == "v3":
url = catalog_urls[(odata_version, third_party)]
params = {}
params["$filter"] = f"Identifier eq '{id}'"
tables = requests.get(url, params).json()["value"]
if tables:
if len(tables) == 1:
meta = tables[0]
else:
pass
# TODO
# This means more then 1 result came back for the same ID - which is unlikely, and suggests a bug in the code more than anything.
else:
raise KeyError(
"Dataset ID not found. Please enter a valid ID, and ensure third_paty is set appropriately"
)
elif odata_version == "v4":
if third_party:
# TODO: HANDLE PROPERLY - is ValueError appropriate here?
raise ValueError(
"Third party datasets (IV3) using odata version v4 are not yet implemented in CBS."
)
meta = requests.get(catalog_urls[(odata_version, third_party)]).json()
else:
raise ValueError("odata version must be either 'v3' or 'v4'")
return meta
@logdec
def _get_main_table_shape(metadata: dict) -> dict:
"""Reads into a CBS dataset metadata and returns the main table's shape as a dict.
- For v3 odata, n_records and n_columns exist in the metadata
- For v4 odata, n_observations exist in the metadata.
This function returns a dict with all 3 keys, and sets non-existing values as None.
Parameters
----------
metadata : dict
The dataset's metadata
Returns
-------
dict
The dataset's main table's shape
"""
main_table_shape = {
"n_records": metadata.get("RecordCount"),
"n_columns": metadata.get("ColumnCount"),
"n_observations": metadata.get("ObservationCount"),
}
return main_table_shape
@logdec
def _generate_table_urls(base_url: str, n_records: int, odata_version: str) -> list:
"""Creates a list of urls for parallel fetching.
Given a base url, this function creates a list of multiple urls, with query parameters
added to the base url, each reading "$skip={i}" where i is a multiplication of 10,000
(for v3) or 100,000 (for v4). The base url is meant to be the url for a CBS table, and
so each generated url corresponds to the next 10,000(/100,000) rows of the table.
Parameters
----------
base_url : str
The base url for the table.
n_records : int
The amount of rows(=records/observations) in the table.
odata_version : str
version of the odata for this dataset - must be either "v3" or "v4".
Returns
-------
table_urls : list of str
A list holding all urls needed to fetch full table data.
"""
# Since v3 already has a parameter ("?$format=json"), the v3 and v4 connectors are different
connector = {"v3": "&", "v4": "?"}
cbs_limit = {"v3": 10000, "v4": 100000}
trailing_zeros = {"v3": 4, "v4": 5}
# Only the main table has more then 10000(/100000 for v4) rows, the other tables use None
if n_records is not None:
# Create url list with query parameters
table_urls = [
base_url
+ f"{connector[odata_version]}$skip={str(i+1)}"
+ ("0" * trailing_zeros[odata_version])
for i in range(n_records // cbs_limit[odata_version])
]
# Add base url to list
table_urls.insert(0, base_url)
else:
table_urls = [base_url]
return table_urls
@logdec
def _get_schema_cbs(metadata_url, odata_version) -> pa.Schema:
"""Returns a pyarrow.Schema for the main table of a cbs dataset given its base metadata url.
Parameters
----------
metadata_url : str
A url containing the metadata of the dataset
odata_version : str
The version of the OData for this dataset - should be "v3" or "v4".
Returns
-------
schema : pa.Schema
A pyarrow Schema object for the main table of the dataset
"""
# TODO complete full list
# odata.types taken from: http://docs.oasis-open.org/odata/odata/v4.0/errata03/os/complete/part3-csdl/odata-v4.0-errata03-os-part3-csdl-complete.html#Picture 1:~:text=4.4%20Primitive%20Types,-Structured
# pyarrow types taken from: https://arrow.apache.org/docs/python/api/datatypes.html
odata_to_pa_hash = {
"Edm.Binary": pa.binary(),
"Edm.Boolean": pa.bool_(),
"Edm.Byte": pa.int8(),
# 'Edm.Date': pa.date32(), or pa.date64(), or something else?
# 'Edm.DateTimeOffset': pa.timestamp() #Likely requires some wrangling to match
# "Edm.Decimal": pa.decimal128(), # TODO: Add precision and scale (see facets below)
"Edm.Double": pa.float64(),
# 'Edm.Duration': #TODO ??
# 'Edm.Guid': #TODO ??
"Edm.Int16": pa.int16(),
"Edm.Int32": pa.int32(),
"Edm.Int64": pa.int64(),
"Edm.SByte": pa.int8(),
"Edm.Single": pa.float32(),
# 'Edm.Stream': #TODO ??
"Edm.String": pa.string(),
# 'Edm.TimeOfDay': #TODO ??
# TODO: add geodata translation:
# 'Edm.Geography'
# 'Edm.GeographyPoint'
# 'Edm.GeographyLineString'
# 'Edm.GeographyPolygon'
# 'Edm.GeographyMultiPoint'
# 'Edm.GeographyMultiLineString'
# 'Edm.GeographyMultiPolygon'
# 'Edm.GeographyCollection'
# 'Edm.Geometry'
# 'Edm.GeometryPoint'
# 'Edm.GeometryLineString'
# 'Edm.GeometryPolygon'
# 'Edm.GeometryMultiPoint'
# 'Edm.GeometryMultiLineString'
# 'Edm.GeometryMultiPolygon'
# 'Edm.GeometryCollection'
}
r = requests.get(metadata_url)
root = ET.fromstring(r.content)
TData = root.find(".//*[@Name='TData']")
schema = []
for item in TData.iter():
if "Type" in item.attrib.keys():
# TODO: Add property facets for relevant types
# see http://docs.oasis-open.org/odata/odata/v4.0/errata03/os/complete/part3-csdl/odata-v4.0-errata03-os-part3-csdl-complete.html#Picture 1:~:text=6.2%20Property%20Facets,-Property
schema.append((item.attrib["Name"], item.attrib["Type"]))
schema = [
(field[0], odata_to_pa_hash.get(field[1], pa.string())) for field in schema
]
schema = pa.schema(schema)
return schema
@logdec
def _get_column_descriptions(urls: dict, odata_version: str) -> dict:
"""Gets the column descriptions from CBS.
Wrapper function to call the correct version function which in turn gets
the dataset description according to the odata version: "v3" or "v4".
Parameters
----------
urls: dict
Dictionary holding urls of the dataset from CBS.
NOTE: urls["????????"] (for v4) or urls["DataProperties"] (for v3)
must be present in order to access the dataset description. #TODO: - Only implemented for V3. Implementation might differ for v4
odata_version: str
version of the odata for this dataset - must be either "v3" or "v4".
Returns
-------
dict
dict holding all coloumn descriptions for the dataset's main table
Raises
------
ValueError
If odata_version is not "v3" or "v4"
"""
if odata_version.lower() == "v4":
# Since odata v4 comes in a long format, this seems irrelevant #TODO: Verify
# column_descriptions | |
error : Error
instance_types : typing.Sequence[~InstanceType]
'''
cost_currency_ = cost_currency
cost_divisor_ = cost_divisor
cost_unit_ = cost_unit
error_ = Error.from_json(error) if error else None
instance_types_ = [InstanceType.from_json(o) for o in instance_types or []]
# Validate arguments against known Juju API types.
if cost_currency_ is not None and not isinstance(cost_currency_, (bytes, str)):
raise Exception("Expected cost_currency_ to be a str, received: {}".format(type(cost_currency_)))
if cost_divisor_ is not None and not isinstance(cost_divisor_, int):
raise Exception("Expected cost_divisor_ to be a int, received: {}".format(type(cost_divisor_)))
if cost_unit_ is not None and not isinstance(cost_unit_, (bytes, str)):
raise Exception("Expected cost_unit_ to be a str, received: {}".format(type(cost_unit_)))
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if instance_types_ is not None and not isinstance(instance_types_, (bytes, str, list)):
raise Exception("Expected instance_types_ to be a Sequence, received: {}".format(type(instance_types_)))
self.cost_currency = cost_currency_
self.cost_divisor = cost_divisor_
self.cost_unit = cost_unit_
self.error = error_
self.instance_types = instance_types_
self.unknown_fields = unknown_fields
class InstanceTypesResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~InstanceTypesResult]
'''
results_ = [InstanceTypesResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class InstancesInfo(Type):
_toSchema = {'machines': 'machines'}
_toPy = {'machines': 'machines'}
def __init__(self, machines=None, **unknown_fields):
'''
machines : typing.Sequence[~InstanceInfo]
'''
machines_ = [InstanceInfo.from_json(o) for o in machines or []]
# Validate arguments against known Juju API types.
if machines_ is not None and not isinstance(machines_, (bytes, str, list)):
raise Exception("Expected machines_ to be a Sequence, received: {}".format(type(machines_)))
self.machines = machines_
self.unknown_fields = unknown_fields
class IntResult(Type):
_toSchema = {'error': 'error', 'result': 'result'}
_toPy = {'error': 'error', 'result': 'result'}
def __init__(self, error=None, result=None, **unknown_fields):
'''
error : Error
result : int
'''
error_ = Error.from_json(error) if error else None
result_ = result
# Validate arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if result_ is not None and not isinstance(result_, int):
raise Exception("Expected result_ to be a int, received: {}".format(type(result_)))
self.error = error_
self.result = result_
self.unknown_fields = unknown_fields
class IntResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~IntResult]
'''
results_ = [IntResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class InterfaceAddress(Type):
_toSchema = {'cidr': 'cidr', 'hostname': 'hostname', 'value': 'value'}
_toPy = {'cidr': 'cidr', 'hostname': 'hostname', 'value': 'value'}
def __init__(self, cidr=None, hostname=None, value=None, **unknown_fields):
'''
cidr : str
hostname : str
value : str
'''
cidr_ = cidr
hostname_ = hostname
value_ = value
# Validate arguments against known Juju API types.
if cidr_ is not None and not isinstance(cidr_, (bytes, str)):
raise Exception("Expected cidr_ to be a str, received: {}".format(type(cidr_)))
if hostname_ is not None and not isinstance(hostname_, (bytes, str)):
raise Exception("Expected hostname_ to be a str, received: {}".format(type(hostname_)))
if value_ is not None and not isinstance(value_, (bytes, str)):
raise Exception("Expected value_ to be a str, received: {}".format(type(value_)))
self.cidr = cidr_
self.hostname = hostname_
self.value = value_
self.unknown_fields = unknown_fields
class InvalidateCredentialArg(Type):
_toSchema = {'reason': 'reason'}
_toPy = {'reason': 'reason'}
def __init__(self, reason=None, **unknown_fields):
'''
reason : str
'''
reason_ = reason
# Validate arguments against known Juju API types.
if reason_ is not None and not isinstance(reason_, (bytes, str)):
raise Exception("Expected reason_ to be a str, received: {}".format(type(reason_)))
self.reason = reason_
self.unknown_fields = unknown_fields
class IsMasterResult(Type):
_toSchema = {'master': 'master'}
_toPy = {'master': 'master'}
def __init__(self, master=None, **unknown_fields):
'''
master : bool
'''
master_ = master
# Validate arguments against known Juju API types.
if master_ is not None and not isinstance(master_, bool):
raise Exception("Expected master_ to be a bool, received: {}".format(type(master_)))
self.master = master_
self.unknown_fields = unknown_fields
class IsMeteredResult(Type):
_toSchema = {'metered': 'metered'}
_toPy = {'metered': 'metered'}
def __init__(self, metered=None, **unknown_fields):
'''
metered : bool
'''
metered_ = metered
# Validate arguments against known Juju API types.
if metered_ is not None and not isinstance(metered_, bool):
raise Exception("Expected metered_ to be a bool, received: {}".format(type(metered_)))
self.metered = metered_
self.unknown_fields = unknown_fields
class JobsResult(Type):
_toSchema = {'error': 'error', 'jobs': 'jobs'}
_toPy = {'error': 'error', 'jobs': 'jobs'}
def __init__(self, error=None, jobs=None, **unknown_fields):
'''
error : Error
jobs : typing.Sequence[str]
'''
error_ = Error.from_json(error) if error else None
jobs_ = jobs
# Validate arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if jobs_ is not None and not isinstance(jobs_, (bytes, str, list)):
raise Exception("Expected jobs_ to be a Sequence, received: {}".format(type(jobs_)))
self.error = error_
self.jobs = jobs_
self.unknown_fields = unknown_fields
class JobsResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~JobsResult]
'''
results_ = [JobsResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class KnownServiceArgs(Type):
_toSchema = {'known_services': 'known-services'}
_toPy = {'known-services': 'known_services'}
def __init__(self, known_services=None, **unknown_fields):
'''
known_services : typing.Sequence[str]
'''
known_services_ = known_services
# Validate arguments against known Juju API types.
if known_services_ is not None and not isinstance(known_services_, (bytes, str, list)):
raise Exception("Expected known_services_ to be a Sequence, received: {}".format(type(known_services_)))
self.known_services = known_services_
self.unknown_fields = unknown_fields
class KubernetesDeploymentInfo(Type):
_toSchema = {'deployment_type': 'deployment-type', 'service_type': 'service-type'}
_toPy = {'deployment-type': 'deployment_type', 'service-type': 'service_type'}
def __init__(self, deployment_type=None, service_type=None, **unknown_fields):
'''
deployment_type : str
service_type : str
'''
deployment_type_ = deployment_type
service_type_ = service_type
# Validate arguments against known Juju API types.
if deployment_type_ is not None and not isinstance(deployment_type_, (bytes, str)):
raise Exception("Expected deployment_type_ to be a str, received: {}".format(type(deployment_type_)))
if service_type_ is not None and not isinstance(service_type_, (bytes, str)):
raise Exception("Expected service_type_ to be a str, received: {}".format(type(service_type_)))
self.deployment_type = deployment_type_
self.service_type = service_type_
self.unknown_fields = unknown_fields
class KubernetesDeviceParams(Type):
_toSchema = {'attributes': 'Attributes', 'count': 'Count', 'type_': 'Type'}
_toPy = {'Attributes': 'attributes', 'Count': 'count', 'Type': 'type_'}
def __init__(self, attributes=None, count=None, type_=None, **unknown_fields):
'''
attributes : typing.Mapping[str, str]
count : int
type_ : str
'''
attributes_ = attributes
count_ = count
type__ = type_
# Validate arguments against known Juju API types.
if attributes_ is not None and not isinstance(attributes_, dict):
raise Exception("Expected attributes_ to be a Mapping, received: {}".format(type(attributes_)))
if count_ is not None and not isinstance(count_, int):
raise Exception("Expected count_ to be a int, received: {}".format(type(count_)))
if type__ is not None and not isinstance(type__, (bytes, str)):
raise Exception("Expected type__ to be a str, received: {}".format(type(type__)))
self.attributes = attributes_
self.count = count_
self.type_ = type__
self.unknown_fields = unknown_fields
class KubernetesFilesystemAttachmentParams(Type):
_toSchema = {'mount_point': 'mount-point', 'provider': 'provider', 'read_only': 'read-only'}
_toPy = {'mount-point': 'mount_point', 'provider': 'provider', 'read-only': 'read_only'}
def __init__(self, mount_point=None, provider=None, read_only=None, **unknown_fields):
'''
mount_point : str
provider : str
read_only : bool
'''
mount_point_ = mount_point
provider_ = provider
read_only_ = read_only
# Validate arguments against known Juju API types.
if mount_point_ is not None and not isinstance(mount_point_, (bytes, str)):
raise Exception("Expected mount_point_ to be a str, received: {}".format(type(mount_point_)))
if provider_ is not None and not isinstance(provider_, (bytes, str)):
raise Exception("Expected provider_ to be a str, received: {}".format(type(provider_)))
if read_only_ is not None and not isinstance(read_only_, bool):
raise Exception("Expected read_only_ to be a bool, received: {}".format(type(read_only_)))
self.mount_point = mount_point_
self.provider = provider_
self.read_only = read_only_
self.unknown_fields = unknown_fields
class KubernetesFilesystemInfo(Type):
_toSchema = {'data': 'data', 'filesystem_id': 'filesystem-id', 'info': 'info', 'mount_point': 'mount-point', 'pool': 'pool', 'read_only': 'read-only', 'size': 'size', 'status': 'status', 'storagename': 'storagename', 'volume': 'volume'}
_toPy = {'data': 'data', 'filesystem-id': 'filesystem_id', 'info': 'info', 'mount-point': 'mount_point', 'pool': 'pool', 'read-only': 'read_only', 'size': 'size', 'status': 'status', | |
clusterable, this should be roughly
# block diagonal
# take the absolute value of all the weights
weight_mat = np.abs(weight_mat)
# print(weight_mat)
# now for the actual calculation...
# divide the matrix into blocks depending on the cluster of the rows and
# columns. then add everything in that block to the appropriate volume and
# cut values.
cut_vals = np.zeros(num_clusters)
vol_vals = np.zeros(num_clusters)
count_rows = Counter(row_labels)
count_cols = Counter(col_labels)
row_offset = 0
for i in range(num_clusters):
num_rows_i = count_rows[i]
end_rows_i = row_offset + num_rows_i
col_offset = 0
for j in range(num_clusters):
num_cols_j = count_cols[j]
end_cols_j = col_offset + num_cols_j
sub_mat = weight_mat[row_offset:end_rows_i, col_offset:end_cols_j]
# print("row cluster", i)
# print("col cluster", j)
# print(sub_mat)
sum_sub_mat = np.sum(sub_mat)
vol_vals[i] += sum_sub_mat
vol_vals[j] += sum_sub_mat
if i != j:
cut_vals[i] += sum_sub_mat
cut_vals[j] += sum_sub_mat
col_offset += num_cols_j
row_offset += num_rows_i
return cut_vals, vol_vals
def compute_ncut(adj_mat, clustering_labels, epsilon, verbose=False):
ncut_terms = {}
unique_labels = np.unique([label for label in clustering_labels if label != -1])
for cluster in unique_labels:
out_mask = (clustering_labels != cluster)
in_mask = (clustering_labels == cluster)
cut = adj_mat[in_mask, :][:, out_mask].sum()
# Sum of the degrees
vol = adj_mat[in_mask, :].sum()
ncut_terms[cluster] = cut / (vol + epsilon)
if verbose:
print('ncut term', cluster, cut, vol)
return sum(ncut_terms.values())
def weights_array_to_cluster_quality(weights_array, adj_mat, num_clusters,
eigen_solver, assign_labels, epsilon,
is_testing=False):
# t1 = time.time()
clustering_labels = cluster_net(num_clusters, adj_mat, eigen_solver, assign_labels)
# t2 = time.time()
ncut_val = compute_ncut(adj_mat, clustering_labels, epsilon, verbose=is_testing)
if is_testing:
ncut_val_previous_method = ncut(weights_array, num_clusters, clustering_labels, epsilon)
print('NCUT Current', ncut_val)
print('NCUT Previous', ncut_val_previous_method)
assert math.isclose(ncut_val, ncut_val_previous_method, abs_tol=1e-5)
return ncut_val, clustering_labels
def connected_comp_analysis(weights, adj_mat):
widths = weights_to_layer_widths(weights)
# get the number of connected components, and label each neuron by what
# connected component it's in
nc, labels = sparse.csgraph.connected_components(adj_mat, directed=False)
counts = Counter(labels)
# make a dictionary of how many CCs have 1 neuron, how many have 2, etc.
counts_dict = {}
# make an array of how many 1-neuron CCs each layer has
num_each_layer = np.zeros(len(widths))
for i in range(nc):
num_i = counts[i]
if num_i in counts_dict:
counts_dict[num_i] += 1
else:
counts_dict[num_i] = 1
# if the connected component is a single neuron, find the layer it's in,
# and add 1 to that index of num_each_layer
if num_i == 1:
neuron = np.where(labels == i)[0][0]
layer = mlp_int_to_tup(neuron, widths)[0]
num_each_layer[layer] += 1
prop_each_layer = num_each_layer / widths
return {'num_comps': nc, 'counts_dict': counts_dict,
'prop_each_layer': prop_each_layer}
def delete_isolated_ccs_refactored(weights, adjacency_matrix, is_testing=False):
"""Assume that all the isolated connected components have only one node."""
# 1D boolean array of non-isolated nodes
node_mask = (adjacency_matrix!=0).toarray().any(axis=1)
no_isolated_adjacency_matrix = adjacency_matrix[:,node_mask][node_mask,:]
if is_testing:
layer_sizes = [w.shape[0] for w in weights]
# create two iterators of the node mask per layer
# they iterator are in shift of one (current, next)
# current - slice rows in the weight matrix
# next - slice columns in the weight matrix
layer_mask = splitter(node_mask, layer_sizes)
current_layer_mask, next_layer_mask = it.tee(layer_mask, 2)
next(next_layer_mask)
bi_layer_masks = it.zip_longest(current_layer_mask, next_layer_mask, fillvalue=Ellipsis)
array_weights = (layer_weights.toarray() if sparse.issparse(layer_weights)
else layer_weights
for layer_weights in weights)
# maybe need .toarray() to sparse instead of np.array
no_isolated_weights = [np.array(layer_weights)[current_mask,:][:,next_mask]
for layer_weights, (current_mask, next_mask)
in zip(array_weights, bi_layer_masks)]
else:
no_isolated_weights = []
return no_isolated_weights, no_isolated_adjacency_matrix, node_mask
def delete_isolated_ccs(weight_array, adj_mat):
# find connected components that aren't represented on both the first and
# the last layer, and delete them from the graph
nc, labels = sparse.csgraph.connected_components(adj_mat, directed=False)
# if there's only one connected component, don't bother
if nc == 1:
return weight_array, adj_mat
widths = weights_to_layer_widths(weight_array)
# find cc labels represented in the first layer
initial_ccs = set()
for i in range(widths[0]):
initial_ccs.add(labels[i])
# find cc labels represented in the final layer
final_ccs = set()
final_layer = len(widths) - 1
for i in range(widths[-1]):
neuron = mlp_tup_to_int((final_layer, i), widths)
final_ccs.add(labels[neuron])
# find cc labels that aren't in either of those two sets
isolated_ccs = set()
for c in range(nc):
if not (c in initial_ccs and c in final_ccs):
isolated_ccs.add(c)
# if there aren't any isolated ccs, don't bother deleting them!
if not isolated_ccs:
return weight_array, adj_mat
# go through weight_array
# for each array, go to the rows and cols
# figure out which things you have to delete, then delete them
new_weight_array = []
for (t, mat) in enumerate(weight_array):
# print("weight array number:", t)
n_rows, n_cols = mat.shape
# print("original n_rows, n_cols:", (n_rows, n_cols))
rows_layer = t
cols_layer = t + 1
# delete rows and cols corresponding to neurons in isolated clusters
rows_to_delete = []
for i in range(n_rows):
neuron = mlp_tup_to_int((rows_layer, i), widths)
if labels[neuron] in isolated_ccs:
rows_to_delete.append(i)
cols_to_delete = []
for j in range(n_cols):
neuron = mlp_tup_to_int((cols_layer, j), widths)
if labels[neuron] in isolated_ccs:
cols_to_delete.append(j)
# print("rows to delete:", rows_to_delete)
# print("columns to delete:", cols_to_delete)
rows_deleted = np.delete(mat, rows_to_delete, 0)
new_mat = np.delete(rows_deleted, cols_to_delete, 1)
# print("new mat shape:", new_mat.shape)
new_weight_array.append(new_mat)
# then return the adj_mat
new_adj_mat = weights_to_graph(new_weight_array)
return new_weight_array, new_adj_mat
<EMAIL>
def shuffle_and_cluster(num_samples, #weights,
weights_path,
#loaded_weights,
network_type, num_clusters,
shuffle_smaller_model, eigen_solver, delete_isolated_ccs_bool,
assign_labels, epsilon, shuffle_method):
######
loaded_weights = load_weights(weights_path)
if network_type == 'mlp':
weights_ = loaded_weights
adj_mat_ = weights_to_graph(loaded_weights)
elif network_type == 'cnn':
weights_, adj_mat_ = cnn_tensors_to_flat_weights_and_graph(loaded_weights)
else:
raise ValueError("network_type must be 'mlp' or 'cnn'")
#######
if shuffle_smaller_model and delete_isolated_ccs_bool:
# delete unconnected components from the net BEFORE SHUFFLING!!!
weights, adj_mat, _ = delete_isolated_ccs_refactored(weights_, adj_mat_,
is_testing=True)
else:
weights, adj_mat = weights_, adj_mat_
#shuff_ncuts = np.array([])
shuff_ncuts = []
assert shuffle_method in SHUFFLE_METHODS
if shuffle_method == 'layer':
shuffle_function = shuffle_weights
elif shuffle_method == 'layer_nonzero':
shuffle_function = shuffle_weights_nonzero
elif shuffle_method == 'layer_nonzero_distribution':
shuffle_function = shuffle_weights_nonzero_distribution
elif shuffle_method == 'layer_all_distribution':
shuffle_function = shuffle_weights_layer_all_distribution
for _ in range(num_samples):
# t_start = time.time()
if network_type == 'mlp':
if shuffle_smaller_model:
shuff_weights_ = list(map(shuffle_function, weights))
else:
shuff_weights_ = list(map(shuffle_function, loaded_weights))
shuff_adj_mat_ = weights_to_graph(shuff_weights_)
else:
shuff_tensors = list(map(shuffle_function, loaded_weights))
shuff_weights_, shuff_adj_mat_ = cnn_tensors_to_flat_weights_and_graph(shuff_tensors)
# NB: this is not quite right, because you're shuffling the whole
# network, meaning that the isolated ccs get shuffled back in
# t_before_mid = time.time()
# print("\ntime to shuffle weights", t_before_mid - t_start)
if delete_isolated_ccs_bool:
shuff_weights, shuff_adj_mat, _ = delete_isolated_ccs_refactored(shuff_weights_,
shuff_adj_mat_)
else:
shuff_weights, shuff_adj_mat = shuff_weights_, shuff_adj_mat_
# t_mid = time.time()
# print("time to delete isolated ccs", t_mid - t_before_mid)
shuff_ncut, _ = weights_array_to_cluster_quality(shuff_weights,
shuff_adj_mat,
num_clusters,
eigen_solver,
assign_labels, epsilon)
shuff_ncuts.append(shuff_ncut)
#shuff_ncuts = np.append(shuff_ncuts, shuff_ncut)
# t_end = time.time()
# print("time to cluster shuffled weights", t_end - t_mid)
return np.array(shuff_ncuts)
<EMAIL>
def run_clustering(weights_path, num_clusters, eigen_solver, assign_labels,
epsilon, num_samples, delete_isolated_ccs_bool, network_type,
shuffle_smaller_model,
with_labels, with_shuffle, shuffle_method, n_workers,
is_testing, with_shuffled_ncuts):
# t0 = time.time()
# load weights and get adjacency matrix
if is_testing:
assert network_type == 'cnn'
loaded_weights = load_weights(weights_path)
if network_type == 'mlp':
weights_ = loaded_weights
adj_mat_ = weights_to_graph(loaded_weights)
elif network_type == 'cnn':
# comparing current and previous version of expanding CNN
if is_testing:
tester_cnn_tensors_to_flat_weights_and_graph(loaded_weights)
weights_, adj_mat_ = cnn_tensors_to_flat_weights_and_graph(loaded_weights)
else:
raise ValueError("network_type must be 'mlp' or 'cnn'")
# t1 = time.time()
# print('time to form adjacency matrix', t1 - t0)
# analyse connectivity structure of network
# cc_dict = connected_comp_analysis(weights_, adj_mat_)
# print("connectivity analysis:", cc_dict)
if delete_isolated_ccs_bool:
# delete unconnected components from the net
weights, adj_mat, node_mask = delete_isolated_ccs_refactored(weights_, adj_mat_,
is_testing=is_testing)
if is_testing:
weights_old, adj_mat_old = delete_isolated_ccs(weights_, adj_mat_)
assert (adj_mat != adj_mat_old).sum() == 0
assert all((w1 == w2).all() for w1, w2 in zip(weights, weights_old))
else:
weights, adj_mat = weights_, adj_mat_
node_mask = numpy.full(adj_mat.shape[0], True)
# t2 = time.time()
# print("time to delete isolated ccs", t2 - t1)
# find cluster quality of this pruned net
print("\nclustering unshuffled weights\n")
unshuffled_ncut, clustering_labels = weights_array_to_cluster_quality(weights, adj_mat,
num_clusters,
eigen_solver,
assign_labels, epsilon,
is_testing)
ave_in_out = (1 - unshuffled_ncut / num_clusters) / (2 * unshuffled_ncut
/ num_clusters)
# t3 = time.time()
# print("time to cluster unshuffled weights", t3 - t2)
result = {'ncut': unshuffled_ncut,
'ave_in_out': ave_in_out,
'node_mask': node_mask}
#return clustering_labels, adj_mat, result
if with_shuffle:
# find cluster quality of other ways of rearranging the net
print("\nclustering shuffled weights\n")
n_samples_per_worker = num_samples // n_workers
| |
'billing_month') and self.billing_month is not None:
_dict['billing_month'] = self.billing_month
if hasattr(self, 'billing_country_code') and self.billing_country_code is not None:
_dict['billing_country_code'] = self.billing_country_code
if hasattr(self, 'billing_currency_code') and self.billing_currency_code is not None:
_dict['billing_currency_code'] = self.billing_currency_code
if hasattr(self, 'resources') and self.resources is not None:
_dict['resources'] = self.resources.to_dict()
if hasattr(self, 'offers') and self.offers is not None:
_dict['offers'] = [x.to_dict() for x in self.offers]
if hasattr(self, 'support') and self.support is not None:
_dict['support'] = [x.to_dict() for x in self.support]
if hasattr(self, 'subscription') and self.subscription is not None:
_dict['subscription'] = self.subscription.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this AccountSummary object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'AccountSummary') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'AccountSummary') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class AccountUsage():
"""
The aggregated usage and charges for all the plans in the account.
:attr str account_id: The ID of the account.
:attr str pricing_country: The target country pricing that should be used.
:attr str currency_code: The currency for the cost fields in the resources,
plans and metrics.
:attr str month: The month.
:attr List[Resource] resources: All the resource used in the account.
"""
def __init__(self,
account_id: str,
pricing_country: str,
currency_code: str,
month: str,
resources: List['Resource']) -> None:
"""
Initialize a AccountUsage object.
:param str account_id: The ID of the account.
:param str pricing_country: The target country pricing that should be used.
:param str currency_code: The currency for the cost fields in the
resources, plans and metrics.
:param str month: The month.
:param List[Resource] resources: All the resource used in the account.
"""
self.account_id = account_id
self.pricing_country = pricing_country
self.currency_code = currency_code
self.month = month
self.resources = resources
@classmethod
def from_dict(cls, _dict: Dict) -> 'AccountUsage':
"""Initialize a AccountUsage object from a json dictionary."""
args = {}
if 'account_id' in _dict:
args['account_id'] = _dict.get('account_id')
else:
raise ValueError('Required property \'account_id\' not present in AccountUsage JSON')
if 'pricing_country' in _dict:
args['pricing_country'] = _dict.get('pricing_country')
else:
raise ValueError('Required property \'pricing_country\' not present in AccountUsage JSON')
if 'currency_code' in _dict:
args['currency_code'] = _dict.get('currency_code')
else:
raise ValueError('Required property \'currency_code\' not present in AccountUsage JSON')
if 'month' in _dict:
args['month'] = _dict.get('month')
else:
raise ValueError('Required property \'month\' not present in AccountUsage JSON')
if 'resources' in _dict:
args['resources'] = [Resource.from_dict(x) for x in _dict.get('resources')]
else:
raise ValueError('Required property \'resources\' not present in AccountUsage JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a AccountUsage object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'account_id') and self.account_id is not None:
_dict['account_id'] = self.account_id
if hasattr(self, 'pricing_country') and self.pricing_country is not None:
_dict['pricing_country'] = self.pricing_country
if hasattr(self, 'currency_code') and self.currency_code is not None:
_dict['currency_code'] = self.currency_code
if hasattr(self, 'month') and self.month is not None:
_dict['month'] = self.month
if hasattr(self, 'resources') and self.resources is not None:
_dict['resources'] = [x.to_dict() for x in self.resources]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this AccountUsage object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'AccountUsage') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'AccountUsage') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class Discount():
"""
Information about a discount that is associated with a metric.
:attr str ref: The reference ID of the discount.
:attr str name: (optional) The name of the discount indicating category.
:attr str display_name: (optional) The name of the discount.
:attr float discount: The discount percentage.
"""
def __init__(self,
ref: str,
discount: float,
*,
name: str = None,
display_name: str = None) -> None:
"""
Initialize a Discount object.
:param str ref: The reference ID of the discount.
:param float discount: The discount percentage.
:param str name: (optional) The name of the discount indicating category.
:param str display_name: (optional) The name of the discount.
"""
self.ref = ref
self.name = name
self.display_name = display_name
self.discount = discount
@classmethod
def from_dict(cls, _dict: Dict) -> 'Discount':
"""Initialize a Discount object from a json dictionary."""
args = {}
if 'ref' in _dict:
args['ref'] = _dict.get('ref')
else:
raise ValueError('Required property \'ref\' not present in Discount JSON')
if 'name' in _dict:
args['name'] = _dict.get('name')
if 'display_name' in _dict:
args['display_name'] = _dict.get('display_name')
if 'discount' in _dict:
args['discount'] = _dict.get('discount')
else:
raise ValueError('Required property \'discount\' not present in Discount JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a Discount object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'ref') and self.ref is not None:
_dict['ref'] = self.ref
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'display_name') and self.display_name is not None:
_dict['display_name'] = self.display_name
if hasattr(self, 'discount') and self.discount is not None:
_dict['discount'] = self.discount
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this Discount object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'Discount') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'Discount') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class InstanceUsage():
"""
The aggregated usage and charges for an instance.
:attr str account_id: The ID of the account.
:attr str resource_instance_id: The ID of the resource instance.
:attr str resource_instance_name: (optional) The name of the resource instance.
:attr str resource_id: The ID of the resource.
:attr str resource_name: (optional) The name of the resource.
:attr str resource_group_id: (optional) The ID of the resource group.
:attr str resource_group_name: (optional) The name of the resource group.
:attr str organization_id: (optional) The ID of the organization.
:attr str organization_name: (optional) The name of the organization.
:attr str space_id: (optional) The ID of the space.
:attr str space_name: (optional) The name of the space.
:attr str consumer_id: (optional) The ID of the consumer.
:attr str region: (optional) The region where instance was provisioned.
:attr str pricing_region: (optional) The pricing region where the usage that was
submitted was rated.
:attr str pricing_country: The target country pricing that should be used.
:attr str currency_code: The currency for the cost fields in the resources,
plans and metrics.
:attr bool billable: Is the cost charged to the account.
:attr str plan_id: The ID of the plan where the instance was provisioned and
rated.
:attr str plan_name: (optional) The name of the plan where the instance was
provisioned and rated.
:attr str month: The month.
:attr List[Metric] usage: All the resource used in the account.
"""
def __init__(self,
account_id: str,
resource_instance_id: str,
resource_id: str,
pricing_country: str,
currency_code: str,
billable: bool,
plan_id: str,
month: str,
usage: List['Metric'],
*,
resource_instance_name: str = None,
resource_name: str = None,
resource_group_id: str = None,
resource_group_name: str = None,
organization_id: str = None,
organization_name: str = None,
space_id: str = None,
space_name: str = None,
consumer_id: str = None,
region: str = None,
pricing_region: str = None,
plan_name: str = None) -> None:
"""
Initialize a InstanceUsage object.
:param str account_id: The ID of the account.
:param str resource_instance_id: The ID of the resource instance.
:param str resource_id: The ID of the resource.
:param str pricing_country: The target country pricing that should be used.
:param str currency_code: The currency for the cost fields in the
resources, plans and metrics.
:param bool billable: Is the cost charged to the account.
:param str plan_id: | |
'VirtualNetworkProfile'},
'internal_load_balancing_mode': {'key': 'properties.internalLoadBalancingMode', 'type': 'str'},
'multi_size': {'key': 'properties.multiSize', 'type': 'str'},
'multi_role_count': {'key': 'properties.multiRoleCount', 'type': 'int'},
'worker_pools': {'key': 'properties.workerPools', 'type': '[WorkerPool]'},
'ipssl_address_count': {'key': 'properties.ipsslAddressCount', 'type': 'int'},
'database_edition': {'key': 'properties.databaseEdition', 'type': 'str'},
'database_service_objective': {'key': 'properties.databaseServiceObjective', 'type': 'str'},
'upgrade_domains': {'key': 'properties.upgradeDomains', 'type': 'int'},
'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'},
'dns_suffix': {'key': 'properties.dnsSuffix', 'type': 'str'},
'last_action': {'key': 'properties.lastAction', 'type': 'str'},
'last_action_result': {'key': 'properties.lastActionResult', 'type': 'str'},
'allowed_multi_sizes': {'key': 'properties.allowedMultiSizes', 'type': 'str'},
'allowed_worker_sizes': {'key': 'properties.allowedWorkerSizes', 'type': 'str'},
'maximum_number_of_machines': {'key': 'properties.maximumNumberOfMachines', 'type': 'int'},
'vip_mappings': {'key': 'properties.vipMappings', 'type': '[VirtualIPMapping]'},
'environment_capacities': {'key': 'properties.environmentCapacities', 'type': '[StampCapacity]'},
'network_access_control_list': {'key': 'properties.networkAccessControlList', 'type': '[NetworkAccessControlEntry]'},
'environment_is_healthy': {'key': 'properties.environmentIsHealthy', 'type': 'bool'},
'environment_status': {'key': 'properties.environmentStatus', 'type': 'str'},
'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'},
'front_end_scale_factor': {'key': 'properties.frontEndScaleFactor', 'type': 'int'},
'default_front_end_scale_factor': {'key': 'properties.defaultFrontEndScaleFactor', 'type': 'int'},
'api_management_account_id': {'key': 'properties.apiManagementAccountId', 'type': 'str'},
'suspended': {'key': 'properties.suspended', 'type': 'bool'},
'dynamic_cache_enabled': {'key': 'properties.dynamicCacheEnabled', 'type': 'bool'},
'cluster_settings': {'key': 'properties.clusterSettings', 'type': '[NameValuePair]'},
'user_whitelisted_ip_ranges': {'key': 'properties.userWhitelistedIpRanges', 'type': '[str]'},
}
def __init__(
self,
*,
kind: Optional[str] = None,
name_properties_name: Optional[str] = None,
location: Optional[str] = None,
vnet_name: Optional[str] = None,
vnet_resource_group_name: Optional[str] = None,
vnet_subnet_name: Optional[str] = None,
virtual_network: Optional["VirtualNetworkProfile"] = None,
internal_load_balancing_mode: Optional[Union[str, "InternalLoadBalancingMode"]] = None,
multi_size: Optional[str] = None,
multi_role_count: Optional[int] = None,
worker_pools: Optional[List["WorkerPool"]] = None,
ipssl_address_count: Optional[int] = None,
dns_suffix: Optional[str] = None,
network_access_control_list: Optional[List["NetworkAccessControlEntry"]] = None,
front_end_scale_factor: Optional[int] = None,
api_management_account_id: Optional[str] = None,
suspended: Optional[bool] = None,
dynamic_cache_enabled: Optional[bool] = None,
cluster_settings: Optional[List["NameValuePair"]] = None,
user_whitelisted_ip_ranges: Optional[List[str]] = None,
**kwargs
):
"""
:keyword kind: Kind of resource.
:paramtype kind: str
:keyword name_properties_name: Name of the App Service Environment.
:paramtype name_properties_name: str
:keyword location: Location of the App Service Environment, e.g. "West US".
:paramtype location: str
:keyword vnet_name: Name of the Virtual Network for the App Service Environment.
:paramtype vnet_name: str
:keyword vnet_resource_group_name: Resource group of the Virtual Network.
:paramtype vnet_resource_group_name: str
:keyword vnet_subnet_name: Subnet of the Virtual Network.
:paramtype vnet_subnet_name: str
:keyword virtual_network: Description of the Virtual Network.
:paramtype virtual_network: ~azure.mgmt.web.v2016_09_01.models.VirtualNetworkProfile
:keyword internal_load_balancing_mode: Specifies which endpoints to serve internally in the
Virtual Network for the App Service Environment. Possible values include: "None", "Web",
"Publishing".
:paramtype internal_load_balancing_mode: str or
~azure.mgmt.web.v2016_09_01.models.InternalLoadBalancingMode
:keyword multi_size: Front-end VM size, e.g. "Medium", "Large".
:paramtype multi_size: str
:keyword multi_role_count: Number of front-end instances.
:paramtype multi_role_count: int
:keyword worker_pools: Description of worker pools with worker size IDs, VM sizes, and number
of workers in each pool.
:paramtype worker_pools: list[~azure.mgmt.web.v2016_09_01.models.WorkerPool]
:keyword ipssl_address_count: Number of IP SSL addresses reserved for the App Service
Environment.
:paramtype ipssl_address_count: int
:keyword dns_suffix: DNS suffix of the App Service Environment.
:paramtype dns_suffix: str
:keyword network_access_control_list: Access control list for controlling traffic to the App
Service Environment.
:paramtype network_access_control_list:
list[~azure.mgmt.web.v2016_09_01.models.NetworkAccessControlEntry]
:keyword front_end_scale_factor: Scale factor for front-ends.
:paramtype front_end_scale_factor: int
:keyword api_management_account_id: API Management Account associated with the App Service
Environment.
:paramtype api_management_account_id: str
:keyword suspended: :code:`<code>true</code>` if the App Service Environment is suspended;
otherwise, :code:`<code>false</code>`. The environment can be suspended, e.g. when the
management endpoint is no longer available
(most likely because NSG blocked the incoming traffic).
:paramtype suspended: bool
:keyword dynamic_cache_enabled: True/false indicating whether the App Service Environment is
suspended. The environment can be suspended e.g. when the management endpoint is no longer
available
(most likely because NSG blocked the incoming traffic).
:paramtype dynamic_cache_enabled: bool
:keyword cluster_settings: Custom settings for changing the behavior of the App Service
Environment.
:paramtype cluster_settings: list[~azure.mgmt.web.v2016_09_01.models.NameValuePair]
:keyword user_whitelisted_ip_ranges: User added ip ranges to whitelist on ASE db.
:paramtype user_whitelisted_ip_ranges: list[str]
"""
super(AppServiceEnvironmentPatchResource, self).__init__(kind=kind, **kwargs)
self.name_properties_name = name_properties_name
self.location = location
self.provisioning_state = None
self.status = None
self.vnet_name = vnet_name
self.vnet_resource_group_name = vnet_resource_group_name
self.vnet_subnet_name = vnet_subnet_name
self.virtual_network = virtual_network
self.internal_load_balancing_mode = internal_load_balancing_mode
self.multi_size = multi_size
self.multi_role_count = multi_role_count
self.worker_pools = worker_pools
self.ipssl_address_count = ipssl_address_count
self.database_edition = None
self.database_service_objective = None
self.upgrade_domains = None
self.subscription_id = None
self.dns_suffix = dns_suffix
self.last_action = None
self.last_action_result = None
self.allowed_multi_sizes = None
self.allowed_worker_sizes = None
self.maximum_number_of_machines = None
self.vip_mappings = None
self.environment_capacities = None
self.network_access_control_list = network_access_control_list
self.environment_is_healthy = None
self.environment_status = None
self.resource_group = None
self.front_end_scale_factor = front_end_scale_factor
self.default_front_end_scale_factor = None
self.api_management_account_id = api_management_account_id
self.suspended = suspended
self.dynamic_cache_enabled = dynamic_cache_enabled
self.cluster_settings = cluster_settings
self.user_whitelisted_ip_ranges = user_whitelisted_ip_ranges
class Resource(msrest.serialization.Model):
"""Azure resource. This resource is tracked in Azure Resource Manager.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:ivar kind: Kind of resource.
:vartype kind: str
:ivar location: Required. Resource Location.
:vartype location: str
:ivar type: Resource type.
:vartype type: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
kind: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword kind: Kind of resource.
:paramtype kind: str
:keyword location: Required. Resource Location.
:paramtype location: str
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
"""
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.kind = kind
self.location = location
self.type = None
self.tags = tags
class AppServiceEnvironmentResource(Resource):
"""App Service Environment ARM resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:ivar kind: Kind of resource.
:vartype kind: str
:ivar location: Required. Resource Location.
:vartype location: str
:ivar type: Resource type.
:vartype type: str
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar name_properties_name: Name of the App Service Environment.
:vartype name_properties_name: str
:ivar location_properties_location: Location of the App Service Environment, e.g. "West US".
:vartype location_properties_location: str
:ivar provisioning_state: Provisioning state of the App Service Environment. Possible values
include: "Succeeded", "Failed", "Canceled", "InProgress", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.web.v2016_09_01.models.ProvisioningState
:ivar status: Current status of the App Service Environment. Possible values include:
"Preparing", "Ready", "Scaling", "Deleting".
:vartype status: str or ~azure.mgmt.web.v2016_09_01.models.HostingEnvironmentStatus
:ivar vnet_name: Name of the Virtual Network for the App Service Environment.
:vartype vnet_name: str
:ivar vnet_resource_group_name: Resource group of the Virtual Network.
:vartype vnet_resource_group_name: str
:ivar vnet_subnet_name: Subnet of the Virtual Network.
:vartype vnet_subnet_name: str
:ivar virtual_network: Description of the Virtual Network.
:vartype virtual_network: ~azure.mgmt.web.v2016_09_01.models.VirtualNetworkProfile
:ivar internal_load_balancing_mode: Specifies which endpoints to serve internally in the
Virtual Network for the App Service Environment. Possible values include: "None", "Web",
"Publishing".
:vartype internal_load_balancing_mode: str or
~azure.mgmt.web.v2016_09_01.models.InternalLoadBalancingMode
:ivar multi_size: Front-end VM size, e.g. "Medium", "Large".
:vartype multi_size: str
:ivar multi_role_count: Number of front-end instances.
:vartype multi_role_count: int
:ivar worker_pools: Description of worker pools with worker size IDs, VM sizes, and number of
workers in each pool.
:vartype worker_pools: list[~azure.mgmt.web.v2016_09_01.models.WorkerPool]
:ivar ipssl_address_count: Number of IP SSL addresses reserved for the App Service Environment.
:vartype ipssl_address_count: int
:ivar database_edition: Edition of the metadata database for the App Service Environment, e.g.
"Standard".
:vartype database_edition: str
:ivar database_service_objective: Service objective of the metadata database for the App
Service Environment, e.g. "S0".
:vartype database_service_objective: str
:ivar upgrade_domains: Number of upgrade domains of the App Service Environment.
:vartype upgrade_domains: int
:ivar subscription_id: Subscription of the App Service Environment.
:vartype subscription_id: str
:ivar dns_suffix: DNS suffix of the App Service Environment.
:vartype dns_suffix: str
:ivar last_action: Last deployment action on the App Service Environment.
:vartype last_action: str
:ivar last_action_result: Result of the last deployment action on the App Service Environment.
:vartype last_action_result: str
:ivar allowed_multi_sizes: List of comma separated strings describing which VM sizes are
allowed for front-ends.
:vartype allowed_multi_sizes: str
:ivar allowed_worker_sizes: List of comma separated strings describing which VM sizes are
allowed | |
Index of parent node for this child node.
LBD_prev : float
Previous iteration value of LBD.
LBD : float
Current value of lower bound estimate.
UBD : float
Current value of upper bound esimate.
fopt : float
Current best objective value
xopt : ndarray
Current best design values.
node_num : int
Index of this current node
nodeHist : <NodeHist>
Data structure containing information about this node.
ubd_count : int
Counter for number of generations.
Returns
-------
float
New upper bound estimate.
float
New best objective value.
ndaray
New design variables.
list
List of parameters for new node.
"""
if OPTIMIZER == 'SNOPT':
options = {'Major optimality tolerance': 1.0e-5}
elif OPTIMIZER == 'SLSQP':
options = {'ACC': 1.0e-5}
elif OPTIMIZER == 'CONMIN':
options = {'DABFUN': 1.0e-5}
active_tol = self.options['active_tol']
local_search = self.options['local_search']
disp = self.options['disp']
trace_iter = self.options['trace_iter']
trace_iter_max = self.options['trace_iter_max']
obj_surrogate = self.obj_surrogate
num_des = len(self.xI_lb)
new_nodes = []
# Keep this to 0.49 to always round towards bottom-left
xloc_iter = np.round(xL_iter + 0.49 * (xU_iter - xL_iter))
floc_iter = self.objective_callback(xloc_iter)
# Genetic Algorithm
if local_search == 0:
# --------------------------------------------------------------
# Step 2: Obtain a local solution using a GA.
# --------------------------------------------------------------
ga = GeneticAlgorithm(self.obj_for_GA)
bits = np.ceil(np.log2(xU_iter - xL_iter + 1)).astype(int)
bits[bits <= 0] = 1
vub_vir = (2**bits - 1) + xL_iter
# More important nodes get a higher population size and number of generations.
if nodeHist.priority_flag == 1:
max_gen = 300
mfac = 6
else:
max_gen = 200
mfac = 4
L = np.sum(bits)
pop_size = mfac * L
t0 = time()
self.xU_iter = xU_iter
xloc_iter_new, floc_iter_new, nfit = \
ga.execute_ga(xL_iter, xL_iter, vub_vir, vub_vir, bits, pop_size, max_gen,
self._randomstate)
t_GA = time() - t0
if floc_iter_new < floc_iter:
floc_iter = floc_iter_new
xloc_iter = xloc_iter_new
# LHS Sampling or SNOPT
else:
# TODO Future research on sampling here
num_samples = np.round(np.max([10, np.min([50, num_des / nodeHist.priority_flag])]))
init_sam_node = lhs(num_des, samples=num_samples, criterion='center',
random_state=self._randomstate)
t_GA = 0.
for ii in range(int(num_samples)):
xloc_iter_new = np.round(xL_iter + init_sam_node[ii] * (xU_iter - xL_iter))
floc_iter_new = self.objective_callback(xloc_iter_new)
# SNOPT
if local_search == 2:
# TODO: did we lose a tol check here?
# active_tol: #Perform at non-flat starting point
if np.abs(floc_iter_new) > -np.inf:
# --------------------------------------------------------------
# Step 2: Obtain a local solution
# --------------------------------------------------------------
# Using a gradient-based method here.
# TODO: Make it more pluggable.
def _objcall(dv_dict):
"""
Compute objective for SNOPT.
"""
fail = 0
x = dv_dict['x']
# Objective
func_dict = {}
func_dict['obj'] = self.objective_callback(x)[0]
return func_dict, fail
xC_iter = xloc_iter_new
opt_x, opt_f, succ_flag, msg = snopt_opt(_objcall, xC_iter, xL_iter,
xU_iter, title='LocalSearch',
options=options)
xloc_iter_new = np.round(np.asarray(opt_x).flatten())
floc_iter_new = self.objective_callback(xloc_iter_new)
if floc_iter_new < floc_iter:
floc_iter = floc_iter_new
xloc_iter = xloc_iter_new
# Do some prechecks before commencing for partitioning.
ubdloc_best = nodeHist.ubdloc_best
if nodeHist.ubdloc_best > floc_iter + 1.0e-6:
ubd_track = np.concatenate((nodeHist.ubd_track, np.array([0])), axis=0)
ubdloc_best = floc_iter
else:
ubd_track = np.concatenate((nodeHist.ubd_track, np.array([1])), axis=0)
# diff_LBD = abs(LBD_prev - LBD_NegConEI)
if len(ubd_track) >= trace_iter_max or \
(len(ubd_track) >= trace_iter and np.sum(ubd_track[-trace_iter:]) == 0):
# TODO : Did we lose ths? -> #and UBD<=-1.0e-3:
child_info = np.array([[par_node, np.inf, floc_iter], [par_node, np.inf, floc_iter]])
# Fathomed due to no change in UBD_loc for 'trace_iter' generations
dis_flag = ['Y', 'Y']
else:
# --------------------------------------------------------------------------
# Step 3: Partition the current rectangle as per the new branching scheme.
# --------------------------------------------------------------------------
child_info = np.zeros([2, 3])
dis_flag = [' ', ' ']
# Choose
l_iter = (xU_iter - xL_iter).argmax()
if xloc_iter[l_iter] < xU_iter[l_iter]:
delta = 0.5 # 0<delta<1
else:
delta = -0.5 # -1<delta<0
for ii in range(2):
lb = xL_iter.copy()
ub = xU_iter.copy()
if ii == 0:
ub[l_iter] = np.floor(xloc_iter[l_iter] + delta)
elif ii == 1:
lb[l_iter] = np.ceil(xloc_iter[l_iter] + delta)
if np.linalg.norm(ub - lb) > active_tol: # Not a point
# --------------------------------------------------------------
# Step 4: Obtain an LBD of f in the newly created node
# --------------------------------------------------------------
S4_fail = False
x_comL, x_comU, Ain_hat, bin_hat = gen_coeff_bound(lb, ub, obj_surrogate)
sU, eflag_sU = self.maximize_S(x_comL, x_comU, Ain_hat, bin_hat)
if eflag_sU:
yL, eflag_yL = self.minimize_y(x_comL, x_comU, Ain_hat, bin_hat)
if eflag_yL:
NegEI = calc_conEI_norm([], obj_surrogate, SSqr=sU, y_hat=yL)
else:
S4_fail = True
else:
S4_fail = True
# Convex approximation failed!
if S4_fail:
LBD_NegConEI = LBD_prev
dis_flag[ii] = 'F'
else:
LBD_NegConEI = max(NegEI, LBD_prev)
# --------------------------------------------------------------
# Step 5: Store any new node inside the active set that has LBD
# lower than the UBD.
# --------------------------------------------------------------
priority_flag = 0
if LBD_NegConEI < np.inf and LBD_prev > -np.inf:
if np.abs((LBD_prev - LBD_NegConEI) / LBD_prev) < 0.005:
priority_flag = 1
nodeHist_new = NodeHist()
nodeHist_new.ubd_track = ubd_track
nodeHist_new.ubdloc_best = ubdloc_best
nodeHist_new.priority_flag = priority_flag
if LBD_NegConEI < UBD - 1.0e-6:
node_num += 1
new_node = [node_num, lb, ub, LBD_NegConEI, floc_iter, nodeHist_new]
new_nodes.append(new_node)
child_info[ii] = np.array([node_num, LBD_NegConEI, floc_iter])
else:
child_info[ii] = np.array([par_node, LBD_NegConEI, floc_iter])
# Flag for child created but not added to active set. (fathomed)
dis_flag[ii] = 'X'
else:
if ii == 1:
xloc_iter = ub
floc_iter = self.objective_callback(xloc_iter)
child_info[ii] = np.array([par_node, np.inf, floc_iter])
# Flag for No child created
dis_flag[ii] = 'x'
# Update the active set whenever better solution found
if floc_iter < UBD:
UBD = floc_iter
fopt = floc_iter
xopt = xloc_iter.reshape(num_des)
if disp:
if (self.iter_count - 1) % 25 == 0:
# Display output in a tabular format
print("=" * 95)
print("%19s%12s%14s%21s" % ("Global", "Parent", "Child1", "Child2"))
template = "%s%8s%10s%8s%9s%11s%10s%11s%11s%11s"
print(template % ("Iter", "LBD", "UBD", "Node", "Node1", "LBD1",
"Node2", "LBD2", "Flocal", "GA time"))
print("=" * 95)
template = "%3d%10.2f%10.2f%6d%8d%1s%13.2f%8d%1s%13.2f%9.2f%9.2f"
print(template % (self.iter_count, LBD, UBD, par_node, child_info[0, 0],
dis_flag[0], child_info[0, 1], child_info[1, 0],
dis_flag[1], child_info[1, 1], child_info[1, 2], t_GA))
return UBD, fopt, xopt, new_nodes
def objective_callback(self, xI):
"""
Evalute main problem objective at the requested point.
Objective is the expected improvement function with modifications to make it concave.
Parameters
----------
xI : ndarray
Value of design variables.
Returns
-------
float
Objective value
"""
obj_surrogate = self.obj_surrogate
# Normalized as per the convention in openmdao_Alpha:Kriging.
xval = (xI - obj_surrogate.X_mean) / obj_surrogate.X_std
NegEI = calc_conEI_norm(xval, obj_surrogate)
# print(xI, f)
return NegEI
def maximize_S(self, x_comL, x_comU, Ain_hat, bin_hat):
"""
Maximize the SigmaSqr Error.
This method finds an upper bound to the SigmaSqr Error, and scales up 'r' to provide a
smooth design space for gradient-based approach.
Parameters
----------
x_comL : ndarray
Full lower bounds vector
x_comU : ndarray
Full upper bounds vector.
Ain_hat : ndarray
Matrix Ain_hat for linear model of constraints.
bin_hat : ndarray
Vector bin_hat for linear model of constraints.
Returns
-------
float
Maximized upper bound for sigma squared error.
bool
Success flag True if successful.
"""
if OPTIMIZER == 'SNOPT':
options = {'Major optimality tolerance': 1.0e-5}
elif OPTIMIZER == 'SLSQP':
options = {'ACC': 1.0e-5}
elif OPTIMIZER == 'CONMIN':
options = {'DABFUN': 1.0e-5}
surrogate = self.obj_surrogate
R_inv = surrogate.R_inv
SigmaSqr = surrogate.SigmaSqr
X = surrogate.X
n, k = X.shape
one = np.ones([n, 1])
xhat_comL = x_comL.copy()
xhat_comU = x_comU.copy()
xhat_comL[k:] = 0.0
xhat_comU[k:] = 1.0
# Calculate the convexity factor alpha
rL = x_comL[k:]
rU = x_comU[k:]
dr_drhat = np.diag(rU[:, 0] - rL[:, 0])
T2_num = np.dot(np.dot(R_inv, one), np.dot(R_inv, one).T)
T2_den = np.dot(one.T, np.dot(R_inv, one))
d2S_dr2 = 2.0 * SigmaSqr * (R_inv - (T2_num / T2_den))
H_hat = np.dot(np.dot(dr_drhat, d2S_dr2), dr_drhat)
# Use Gershgorin's circle theorem to find a lower bound of the
# min eigen value of the hessian
eig_lb = np.zeros([n, 1])
for ii in range(n):
dia_ele = H_hat[ii, ii]
sum_rw = 0.0
sum_col = 0.0
for jj in range(n):
if ii != jj:
sum_rw += np.abs(H_hat[ii, jj])
sum_col += np.abs(H_hat[jj, ii])
eig_lb[ii] = dia_ele - np.min(np.array([sum_rw, sum_col]))
eig_min = np.min(eig_lb)
alpha = np.max(np.array([0.0, -0.5 * eig_min]))
# Maximize S
x0 = 0.5 * (xhat_comL + xhat_comU)
# Just storing stuff here to pull it out in the callback.
surrogate._alpha = alpha
self.x_comL = x_comL
self.x_comU = x_comU
self.xhat_comL = xhat_comL
self.xhat_comU = xhat_comU
self.Ain_hat = Ain_hat
self.bin_hat = bin_hat
opt_x, opt_f, succ_flag, msg = snopt_opt(self.calc_SSqr_convex, x0, xhat_comL,
xhat_comU, ncon=len(bin_hat),
| |
binvals = copy.deepcopy(binvals)
if not isinstance(binvals, (list, tuple)):
binvals = [binvals]
if not isinstance(zeropad, (list, tuple, np.ndarray)):
zeropad = [zeropad]
if isinstance(axis, (list, tuple, np.ndarray)) and not isinstance(zeropad, (list, tuple, np.ndarray)):
raise ValueError("If axis is an iterable, so must be zeropad.")
if len(axis) != len(zeropad):
raise ValueError("len(axis) must equal len(zeropad)")
for i, ax in enumerate(axis):
if zeropad[i] > 0:
if undo:
s = [slice(None) for j in range(data.ndim)]
s[ax] = slice(zeropad[i], -zeropad[i])
s = tuple(s)
data = data[s]
if binvals[i] is not None:
binvals[i] = binvals[i][s[i]]
else:
zshape = list(data.shape)
zshape[ax] = zeropad[i]
if bool_dtype:
z = np.ones(zshape, np.bool)
else:
z = np.zeros(zshape, data.dtype)
data = np.concatenate([z, data, z], axis=ax)
if binvals[i] is not None:
dx = np.median(np.diff(binvals[i]))
Nbin = binvals[i].size
z = np.arange(1, zeropad[i] + 1)
binvals[i] = np.concatenate([binvals[i][0] - z[::-1] * dx, binvals[i], binvals[i][-1] + z * dx])
if len(binvals) == 1:
binvals = binvals[0]
return data, binvals
def noise_eq_bandwidth(window, axis=-1):
"""
Calculate the noise equivalent bandwidth (NEB) of a windowing function
as
sqrt(window.size * window.max ** 2 / sum(window ** 2))
See https://analog.intgckts.com/equivalent-noise-bandwidth/
Args:
window : float ndarray
axis : int, axis along which to calculate NEB
Returns
neb : float or ndarray
Noise equivalent bandwidth of the window
"""
return np.sqrt(window.shape[axis] * np.max(window, axis=axis)**2 / np.sum(window**2, dtype=np.float, axis=axis))
def gen_filter_properties(ax='freq', horizon=1, standoff=0, min_dly=0, bl_len=None,
max_frate=0):
"""
Convert standard delay and fringe-rate filtering parameters
into uvtools.dspec.fourier_filter parameters.
If ax == 'both', filter properties are returned as (time, freq)
Args:
ax : str, options = ['freq', 'time', 'both']
horizon : float, foreground wedge horizon coefficient
standoff : float, wedge buffer [nanosec]
min_dly: float, the CLEAN delay window
is never below this minimum value (in nanosec)
bl_len : float, baseline length in seconds (i.e. meters / c)
max_frate : float, maximum |fringe-rate| to filter [mHz]
Returns:
filter_centers
list of filter centers in units of Hz or sec
filter_half_widths
list of filter half widths in units Hz or sec
"""
if ax == 'freq' or ax == 'both':
filter_centers_freq = [0.]
assert bl_len is not None
# bl_dly in nanosec
bl_dly = dspec._get_bl_dly(bl_len * 1e9, horizon=horizon, standoff=standoff, min_dly=min_dly)
filter_half_widths_freq = [bl_dly * 1e-9]
if ax == 'time' or ax == 'both':
if max_frate is not None:
filter_centers_time = [0.]
filter_half_widths_time = [max_frate * 1e-3]
else:
raise AssertionError("must supply max_frate if ax=='time' or ax=='both'!")
if ax == 'both':
filter_half_widths = [filter_half_widths_time, filter_half_widths_freq]
filter_centers = [filter_centers_time, filter_centers_freq]
elif ax == 'freq':
filter_centers = filter_centers_freq
filter_half_widths = filter_half_widths_freq
elif ax == 'time':
filter_centers = filter_centers_time
filter_half_widths = filter_half_widths_time
return filter_centers, filter_half_widths
def _trim_status(info_dict, axis, zeropad):
'''
Trims the info status dictionary for a zero-padded
filter so that the status of integrations that were
in the zero-pad region are deleted
Parameters
----------
info : dict, info dictionary
axis : integer, index of axis to trim
zeropad : integer
Returns
-------
Nothing, modifies the provided dictionary in place.
'''
# delete statuses in zero-pad region
statuses = info_dict['status']['axis_%d' % axis]
nints = len(statuses)
for i in range(zeropad):
del statuses[i]
del statuses[nints - i - 1]
# now update keys of the dict elements we wish to keep
nints = len(statuses)
for i in range(nints):
statuses[i] = statuses.pop(i + zeropad)
def _adjust_info_indices(x, info_dict, edges, freq_baseind):
"""Adjust indices in info dict to reflect rows inserted by restore_flagged_edges
Parameters
----------
x: 2-tuple/list
x-axis of data that has had rows/columns adjoining discontinuities removed.
info_dict: dictionary
info dict output by dspec.fourier_filter.
edges: 2-list of lists of 2-tuples.
list of 2-tuples indicating how many channels need to be inserted back
within each discontinuous chunk.
freq_baseind: int.
index base for freq dimension
(needed if processing spw_range that is not at lowest index of data).
Returns
-------
N/A:
modifies info-dict in place.
"""
chunks = [find_discontinuity_edges(x[m]) for m in [1, 0]]
axinds = [0, 1]
edges = [edges[1], edges[0]]
baseinds = [freq_baseind, 0]
for axind, axchunks, axedges in zip(axinds, chunks, edges):
statuses = info_dict['status']['axis_%d' % axind]
offset = np.sum(np.hstack(axedges))
for chunk, edge in zip(chunks[axind][::-1], edges[axind][::-1]):
offset -= edge[1]
for ind in range(chunk[1] - 1, chunk[0] - 1, -1):
statuses[ind + offset + baseinds[axind]] = statuses.pop(ind)
offset -= edge[0]
# ------------------------------------------
# Here is an argparser with core arguments
# needed for all types of xtalk and delay
# filtering.
# ------------------------------------------
def _filter_argparser():
"""
Core Arg parser for commandline operation of hera_cal.delay_filter and hera_cal.xtalk_filter
Parameters:
None
Returns:
Argparser with core (but not complete) functionality that is called by _linear_argparser and
_clean_argparser.
"""
def list_of_int_tuples(v):
if '~' in v:
v = [tuple([int(_x) for _x in x.split('~')]) for x in v.split(",")]
else:
v = [tuple([int(_x) for _x in x.split()]) for x in v.split(",")]
return v
ap = argparse.ArgumentParser(description="Perform delay filter of visibility data.")
ap.add_argument("datafilelist", default=None, type=str, nargs="+", help="list of data files to read in and perform filtering on.")
ap.add_argument("--mode", type=str, default="clean", help="filtering mode to use. Can be dpss_leastsq, clean, dayenu.")
ap.add_argument("--filetype_in", type=str, default='uvh5', help='filetype of input data files (default "uvh5")')
ap.add_argument("--filetype_out", type=str, default='uvh5', help='filetype for output data files (default "uvh5")')
ap.add_argument("--res_outfilename", default=None, type=str, help="path for writing the filtered visibilities with flags")
ap.add_argument("--clobber", default=False, action="store_true", help='overwrites existing file at outfile')
ap.add_argument("--spw_range", type=int, default=None, nargs=2, help="spectral window of data to foreground filter.")
ap.add_argument("--tol", type=float, default=1e-9, help='Threshold for foreground and xtalk subtraction (default 1e-9)')
ap.add_argument("--cornerturnfile", type=str, default=None, help="path to visibility data file to use as an index for baseline chunk in cornerturn."
"Warning: Providing this file will result in outputs with significantly different structure "
"then inputs. Only use it if you know what you are doing. Default is None.")
ap.add_argument("--zeropad", default=None, type=int, help="number of bins to zeropad on both sides of FFT axis")
ap.add_argument("--Nbls_per_load", default=None, type=int, help="the number of baselines to load at once (default None means load full data")
ap.add_argument("--skip_wgt", type=float, default=0.1, help='skips filtering and flags times with unflagged fraction ~< skip_wgt (default 0.1)')
ap.add_argument("--factorize_flags", default=False, action="store_true", help="Factorize flags.")
ap.add_argument("--time_thresh", type=float, default=0.05, help="time threshold above which to completely flag channels and below which to flag times with flagged channel.")
ap.add_argument("--calfilelist", default=None, type=str, nargs="+", help="list of calibration files.")
ap.add_argument("--CLEAN_outfilename", default=None, type=str, help="path for writing the filtered model visibilities (with the same flags)")
ap.add_argument("--filled_outfilename", default=None, type=str, help="path for writing the original data but with flags unflagged and replaced with filtered models wherever possible")
ap.add_argument("--external_flags", default=None, type=str, nargs="+", help="path(s) to external flag files that you wish to apply.")
ap.add_argument("--overwrite_flags", default=False, action="store_true", help="overwrite existing flags.")
ap.add_argument("--flag_yaml", default=None, type=str, help="path to a flagging yaml containing apriori antenna, freq, and time flags.")
ap.add_argument("--polarizations", default=None, type=str, nargs="+", help="list of polarizations to filter.")
ap.add_argument("--verbose", default=False, action="store_true", help="Lots of text.")
ap.add_argument("--filter_spw_ranges", default=None, type=list_of_int_tuples, help="List of spw channel selections to filter independently. Two acceptable formats are "
"Ex1: '200~300,500~650' --> [(200, 300), (500, 650), ...] and "
"Ex2: '200 300, 500 650' --> [(200, 300), (500, 650), ...]")
# clean arguments.
clean_options = ap.add_argument_group(title='Options for CLEAN (arguments only used if mode=="clean"!)')
clean_options.add_argument("--window", type=str, default='blackman-harris', help='window function for frequency filtering (default "blackman-harris",\
see uvtools.dspec.gen_window for options')
clean_options.add_argument("--maxiter", type=int, default=100, help='maximum iterations for aipy.deconv.clean to converge (default 100)')
clean_options.add_argument("--edgecut_low", default=0, type=int, help="Number of channels to flag on lower band edge and exclude from window function.")
clean_options.add_argument("--edgecut_hi", default=0, type=int, help="Number of channels to flag on upper band edge and exclude from window function.")
clean_options.add_argument("--gain", type=float, default=0.1, help="Fraction of residual to use in each iteration.")
clean_options.add_argument("--alpha", type=float, default=.5, help="If window='tukey', use this alpha parameter (default .5).")
cache_options = ap.add_argument_group(title='Options for caching (arguments only used if mode!="clean")')
cache_options.add_argument("--write_cache", default=False, action="store_true", help="if True, writes newly computed filter matrices to cache.")
cache_options.add_argument("--cache_dir", type=str, default=None, help="directory to store cached filtering matrices in.")
cache_options.add_argument("--read_cache", default=False, action="store_true", help="If true, read in cache files in directory specified by cache_dir.")
linear_options = ap.add_argument_group(title="Options for linear filtering (dayenu and dpss_leastsq)")
linear_options.add_argument("--max_contiguous_edge_flags", type=int, default=1, help="Skip integrations with at least this number of contiguous edge flags.")
return ap
def time_chunk_from_baseline_chunks(time_chunk_template, baseline_chunk_files, outfilename, clobber=False, time_bounds=False):
"""Combine multiple waterfall files (with disjoint baseline sets) into time-limited file with all baselines.
The methods delay_filter.load_delay_filter_and_write_baseline_list and
| |
?, ?, ?)''',
(user_id,
None,
engine_workflow.expiration_date,
name,
constants.WORKFLOW_NOT_STARTED,
datetime.now(),
engine_workflow.queue))
engine_workflow.wf_id = cursor.lastrowid
# the transfers must be registered before the jobs
for transfer in six.itervalues(
engine_workflow.transfer_mapping):
transfer.workflow_id = engine_workflow.wf_id
self.add_transfer(transfer,
user_id,
engine_workflow.expiration_date,
external_cursor=cursor)
if isinstance(transfer, FileTransfer):
engine_workflow.registered_tr[
transfer.transfer_id] = transfer
else:
engine_workflow.registered_tmp[
transfer.temp_path_id] = transfer
job_info = []
if login is None:
login = self.get_user_login(cursor)
for job in six.itervalues(engine_workflow.job_mapping):
job.workflow_id = engine_workflow.wf_id
job = self.add_job(user_id,
job,
engine_workflow.expiration_date,
external_cursor=cursor,
login=login)
job_info.append(
(job.job_id, job.stdout_file, job.stderr_file))
engine_workflow.registered_jobs[job.job_id] = job
pickled_workflow = pickle.dumps(engine_workflow,
protocol=DB_PICKLE_PROTOCOL)
cursor.execute('''UPDATE workflows
SET pickled_engine_workflow=?
WHERE id=?''',
(sqlite3.Binary(pickled_workflow),
engine_workflow.wf_id))
for dest_job, links \
in six.iteritems(engine_workflow.param_links):
edest_job = engine_workflow.job_mapping[dest_job]
for dest_param, linkl in six.iteritems(links):
for link in linkl:
esrc_job = engine_workflow.job_mapping[link[0]]
func = None
if len(link) > 2:
func = sqlite3.Binary(pickle.dumps(link[2]))
cursor.execute(
'''INSERT INTO param_links
(workflow_id,
dest_job_id,
dest_param,
src_job_id,
src_param,
pickled_function)
VALUES (?, ?, ?, ?, ?, ?)''',
(engine_workflow.wf_id, edest_job.job_id,
dest_param, esrc_job.job_id, link[1], func))
except Exception as e:
connection.rollback()
cursor.close()
connection.close()
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
connection.commit()
cursor.close()
connection.close()
self.logger.debug("==>end of add_workflow")
return engine_workflow
def delete_workflow(self, wf_id):
'''
Remove the workflow from the database. Remove all associated jobs and transfers.
Parameters
----------
wf_id: int
'''
self.logger.debug("=> delete_workflow")
self.logger.debug("wf_id is: " + str(wf_id))
with self._lock:
# set expiration date to yesterday + clean() ?
connection = self._connect()
cursor = connection.cursor()
yesterday = date.today() - timedelta(days=1)
try:
cursor.execute(
'UPDATE workflows SET expiration_date=? WHERE id=?', (yesterday, wf_id))
cursor.execute(
'UPDATE jobs SET expiration_date=? WHERE workflow_id=?', (yesterday, wf_id))
cursor.execute(
'UPDATE transfers SET expiration_date=? WHERE workflow_id=?', (yesterday, wf_id))
cursor.execute(
'UPDATE temporary_paths SET expiration_date=? WHERE workflow_id=?', (yesterday, wf_id))
except Exception as e:
connection.rollback()
cursor.close()
connection.close()
self.vacuum()
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
cursor.close()
connection.commit()
connection.close()
self.clean()
self.vacuum()
def change_workflow_expiration_date(self, wf_id, new_date, user_id):
'''
Change the workflow expiration date.
@type wf_id: int
@type new_date: datetime.datetime
'''
self.logger.debug("=> change_workflow_expiration_date")
with self._lock:
connection = self._connect()
cursor = connection.cursor()
self._check_workflow(connection, cursor, wf_id, user_id)
try:
cursor.execute(
'UPDATE workflows SET expiration_date=? WHERE id=?', (new_date, wf_id))
except Exception as e:
connection.rollback()
cursor.close()
connection.close()
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
connection.commit()
cursor.close()
connection.close()
def get_engine_workflow(self, wf_id, user_id):
'''
Returns a EngineWorkflow object.
The wf_id must be valid.
Parameters
----------
wf_id: int
Returns
-------
engine: EngineWorkflow
workflow object
'''
self.logger.debug("=> get_engine_workflow")
with self._lock:
connection = self._connect()
cursor = connection.cursor()
self._check_workflow(connection, cursor, wf_id, user_id)
try:
pickled_workflow = six.next(cursor.execute(
'''SELECT
pickled_engine_workflow
FROM workflows WHERE id=?''',
[wf_id]))[0]
except Exception as e:
cursor.close()
connection.close()
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
cursor.close()
connection.close()
if pickled_workflow:
if six.PY2:
workflow = pickle.loads(pickled_workflow)
else:
workflow = pickle.loads(pickled_workflow, encoding='utf-8')
else:
workflow = None
return workflow
def set_workflow_status(self, wf_id, status, force=False):
'''
Updates the workflow status in the database.
The status must be valid (ie a string among the workflow status
string defined in constants.WORKFLOW_STATUS)
Parameters
----------
wf_id: int
status: str
workflow status as defined in constants.WORKFLOW_STATUS
'''
self.logger.debug("=> set_workflow_status, wf_id: %s, status: %s"
% (wf_id, status))
with self._lock:
# TBI if the status is not valid raise an exception ??
connection = self._connect()
cursor = connection.cursor()
try:
prev_status = six.next(cursor.execute(
'''SELECT status
FROM workflows WHERE id=?''',
[wf_id]))[0]
prev_status = self._string_conversion(prev_status)
if force or \
(prev_status != constants.DELETE_PENDING and
prev_status != constants.KILL_PENDING):
cursor.execute('''UPDATE workflows
SET status=?,
last_status_update=?
WHERE id=?''',
(status,
datetime.now(),
wf_id))
self.logger.debug("===> workflow_status updated")
else:
self.logger.debug("===> (workflow_status not updated)")
except Exception as e:
connection.rollback()
cursor.close()
connection.close()
self.logger.error(
"===> workflow_status update failed, error: %s, : %s"
% (str(type(e)), str(e)))
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
connection.commit()
cursor.close()
connection.close()
def get_workflow_status(self, wf_id, user_id):
'''
Returns the workflow status stored in the database
(updated by L{DrmaaWorkflowEngine}) and the date of its last update.
'''
self.logger.debug("=> get_workflow_status, wf_id: %s, user_id: %s"
% (wf_id, user_id))
with self._lock:
connection = self._connect()
cursor = connection.cursor()
self._check_workflow(connection, cursor, wf_id, user_id)
try:
(status, strdate) = six.next(cursor.execute(
'''SELECT status, last_status_update
FROM workflows WHERE id=?''',
[wf_id]))
except Exception as e:
self.logger.exception("In get_workflow_status")
cursor.close()
connection.close()
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
status = self._string_conversion(status)
date = self._str_to_date_conversion(strdate)
cursor.close()
connection.close()
self.logger.debug("===> status: %s, date: %s" % (status, strdate))
self.logger.debug("===> status: %s, date: %s" % (status, repr(date)))
return status, date
def get_detailed_workflow_status(self, wf_id, check_status=False,
with_drms_id=True):
'''
Gets back the status of all the workflow elements at once, minimizing
the requests to the database.
Parameters
----------
wf_id: int
check_status: bool (optional, default=False)
if True, check that a workflow with status RUNNING has actually
some running or pending jobs. If not, set the state to DONE. It
should not happen, and if it does, it's a bug (which has actually
happened in Soma-Workflow <= 2.8.0)
with_drms_id: bool (optional, default=False)
if True the DRMS id (drmaa_id) is also included in the returned
tuple for each job. This info has been added in soma_workflow 3.0
and is thus optional to avoid breaking compatibility with earlier
versions.
Returns
-------
tuple (sequence of tuple (job_id,
status,
queue,
exit_info,
(submission_date,
execution_date,
ending_date),
[drmaa_id]),
sequence of tuple (transfer_id,
client_file_path,
client_paths,
status,
transfer_type),
workflow_status,
workflow_queue,
sequence of tuple (temp_path_id,
engine_path,
status),
)
'''
self.logger.debug("=> get_detailed_workflow_status, wf_id: %s" % wf_id)
with self._lock:
connection = self._connect()
cursor = connection.cursor()
try:
# workflow status
(wf_status, wf_queue) = six.next(cursor.execute(
'''SELECT
status,
queue
FROM workflows WHERE id=?''',
[wf_id])) # supposes that the wf_id is valid
workflow_status = ([], [], wf_status, wf_queue, [])
# jobs
for row in cursor.execute('''SELECT id,
status,
exit_status,
exit_value,
terminating_signal,
resource_usage,
submission_date,
execution_date,
ending_date,
queue,
drmaa_id
FROM jobs WHERE workflow_id=?''',
[wf_id]):
job_id, status, exit_status, exit_value, term_signal, \
resource_usage, submission_date, execution_date, \
ending_date, queue, drmaa_id = row
submission_date = self._str_to_date_conversion(
submission_date)
execution_date = self._str_to_date_conversion(
execution_date)
ending_date = self._str_to_date_conversion(ending_date)
queue = self._string_conversion(queue)
if with_drms_id:
workflow_status[0].append(
(job_id, status, queue,
(exit_status, exit_value, term_signal,
resource_usage),
(submission_date, execution_date, ending_date,
queue),
drmaa_id))
else:
workflow_status[0].append(
(job_id, status, queue,
(exit_status, exit_value, term_signal,
resource_usage),
(submission_date, execution_date, ending_date,
queue)))
# transfers
for row in cursor.execute('''SELECT id,
engine_file_path,
client_file_path,
client_paths,
status,
transfer_type
FROM transfers WHERE workflow_id=?''',
[wf_id]):
(transfer_id,
engine_file_path,
client_file_path,
client_paths,
status,
transfer_type) = row
engine_file_path = self._string_conversion(
engine_file_path)
client_file_path = self._string_conversion(
client_file_path)
status = self._string_conversion(status)
transfer_type = self._string_conversion(transfer_type)
if client_paths:
client_paths = self._string_conversion(
client_paths).split(file_separator)
else:
client_paths = None
workflow_status[1].append((transfer_id,
engine_file_path,
client_file_path,
client_paths,
status,
transfer_type))
# temporary_paths
for row in cursor.execute('''SELECT temp_path_id,
engine_file_path,
status
FROM temporary_paths WHERE workflow_id=?''',
[wf_id]):
(temp_path_id,
engine_file_path,
status) = row
engine_file_path = self._string_conversion(
engine_file_path)
status = self._string_conversion(status)
workflow_status[4].append((temp_path_id,
engine_file_path,
status))
except Exception as e:
cursor.close()
connection.close()
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
cursor.close()
connection.close()
self.logger.debug("===> status: %s, queue: %s" % (wf_status, wf_queue))
if check_status and wf_status == constants.WORKFLOW_IN_PROGRESS:
done = []
not_done = []
for job_status in workflow_status[0]:
if job_status[1] in (constants.DONE, constants.FAILED):
done.append(job_status[0])
else:
not_done.append(job_status[0])
self.logger.debug("===> ended jobs: %d, not ended: %d"
% (len(done), len(not_done)))
if len(not_done) == 0:
self.logger.warning("=> Workflow status error: is RUNNING "
"with no jobs left to be processed")
self.logger.warning("=> fixing workflow status")
self.set_workflow_status(wf_id, constants.WORKFLOW_DONE, True)
return workflow_status
#
# JOBS
def _check_job(self, connection, cursor, job_id, user_id):
return self._check_jobs(connection, cursor, [job_id], user_id)
def _check_jobs(self, connection, cursor, job_ids, user_id):
maxv = sqlite3_max_variable_number()
nmax = maxv
if maxv == 0:
nmax = len(job_ids)
if nmax == 0:
nmax = 1
nchunks = int(math.ceil(float(len(job_ids)) / nmax))
for chunk in range(nchunks):
if chunk < nchunks - 1:
n = nmax
else:
n = len(job_ids) - chunk * nmax
job_str = ','.join(['?'] * n)
try:
sel = cursor.execute(
'''SELECT id FROM jobs WHERE id IN (%s) and user_id=?'''
% job_str,
job_ids[chunk * nmax:chunk * nmax + n] + [user_id])
except Exception as e:
cursor.close()
connection.close()
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
ids = set()
for job_id in sel:
ids.add(int(job_id[0]))
if len(ids) != len(job_ids):
missing = [j for j in job_ids if j not in ids]
raise UnknownObjectError(
"The job ids " + ','.join([str(j) for j in missing])
+ " are not valid or do not belong to the user "
+ repr(user_id))
return True
def is_valid_job(self, job_id, user_id):
self.logger.debug("=> is_valid_job")
with self._lock:
connection = self._connect()
cursor = connection.cursor()
last_status_update = None
try:
sel = cursor.execute(
'''SELECT last_status_update
FROM jobs
WHERE id=?''',
[job_id])
last_status_update = six.next(sel)[0]
count = 1
except StopIteration:
count = 0
except Exception as e:
cursor.close()
connection.close()
six.reraise(DatabaseError, DatabaseError(e), sys.exc_info()[2])
cursor.close()
connection.close()
last_status_update = self._str_to_date_conversion(
last_status_update)
return (count != 0, | |
)
"""
Input graph:
input(shape=10,2,3)--->transpose(axis=[0,2,1])----->relu---->transpose(axis=[0,2,1])---->out1
|
|
--->reduce(axis=2)----->log---->transpose(axis=[0,2,1])---->out2
Output graph:
input(shape=10,2,3)----->relu---->out1
|
|
--->reduce(axis=1)----->log---->out2
"""
def test_fusion_with_axis_op(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(10, 2, 3))])
def prog(x):
x = mb.transpose(x=x, perm=[0, 2, 1])
x1 = mb.relu(x=x)
x2 = mb.reduce_mean(x=x, axes=[2], keep_dims=True)
y1 = mb.transpose(x=x1, perm=[0, 2, 1])
x3 = mb.log(x=x2)
y2 = mb.transpose(x=x3, perm=[0, 2, 1])
return y1, y2
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog),
["transpose", "relu", "reduce_mean", "transpose", "log", "transpose"],
)
self.assertEqual(get_op_types_in_program(prog), ["relu", "reduce_mean", "log"])
assert list(block.find_ops(op_type="reduce_mean")[0].inputs["axes"].val) == [1]
assert_model_is_valid(
prog,
{"x": (10, 2, 3)},
expected_output_shapes={
block.outputs[0].name: (10, 2, 3),
block.outputs[1].name: (10, 1, 3),
},
)
"""
Input graph:
input(shape=11,2,3,6)--->transpose(axis=[0,3,1,2])---
|
|
--->pad(pad=[0,0,0,0,1,2,3,4])
|
|-->log--->transpose(axis=[0,2,3,1])-->out1(shape=11,5,10,6)
Output graph:
same as input graph, as transpose cannot be pushed through the pad op since "reflect" mode is only supported
along the last two axis
"""
def test_fusion_with_pad_reflective_op_0(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(11, 2, 3, 6))])
def prog(x):
x = mb.transpose(x=x, perm=[0, 3, 1, 2])
x2 = mb.pad(x=x, pad=[0, 0, 0, 0, 1, 2, 3, 4], mode="reflect")
x3 = mb.log(x=x2)
y2 = mb.transpose(x=x3, perm=[0, 2, 3, 1])
return y2
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog), ["transpose", "pad", "log", "transpose"]
)
self.assertEqual(
get_op_types_in_program(prog), ["transpose", "pad", "log", "transpose"]
)
assert list(block.find_ops(op_type="pad")[0].inputs["pad"].val.flatten()) == [
0,
0,
0,
0,
1,
2,
3,
4,
]
assert_model_is_valid(
prog,
{"x": (11, 2, 3, 6)},
expected_output_shapes={block.outputs[0].name: (11, 5, 10, 6)},
)
"""
Input graph:
input(shape=11,2,3,6)--->transpose(axis=[0,1,3,2])---
|
|
--->pad(pad=[0,0,0,0,1,2,3,4])
|
|-->log--->transpose(axis=[0,1,3,2])-->out1(shape=11,2,10,9)
Output graph:
input(shape=11,2,3,6)--->pad(pad=[0,0,0,0,3,4,1,2])-->log-->out1(shape=11,2,10,9)
"""
def test_fusion_with_pad_reflective_op_1(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(11, 2, 3, 6))])
def prog(x):
x = mb.transpose(x=x, perm=[0, 1, 3, 2])
x2 = mb.pad(x=x, pad=[0, 0, 0, 0, 1, 2, 3, 4], mode="reflect")
x3 = mb.log(x=x2)
y2 = mb.transpose(x=x3, perm=[0, 1, 3, 2])
return y2
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog), ["transpose", "pad", "log", "transpose"]
)
self.assertEqual(get_op_types_in_program(prog), ["pad", "log"])
assert list(block.find_ops(op_type="pad")[0].inputs["pad"].val.flatten()) == [
0,
0,
0,
0,
3,
4,
1,
2,
]
assert_model_is_valid(
prog,
{"x": (11, 2, 3, 6)},
expected_output_shapes={block.outputs[0].name: (11, 2, 10, 9)},
)
"""
Input graph:
input(shape=11,2,3,6)--->transpose(axis=[0,3,1,2])---
|
|
--->pad(pad=[0,0,0,0,1,2,3,4])
|
|-->log--->transpose(axis=[0,2,3,1])-->out1(shape=11,5,10,6)
Output graph:
input(shape=11,2,3,6)--->pad(pad=[0,0,1,2,3,4,0,0])-->log-->out1(shape=11,5,10,6)
"""
def test_fusion_with_pad_constant_op(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(11, 2, 3, 6))])
def prog(x):
x = mb.transpose(x=x, perm=[0, 3, 1, 2])
x2 = mb.pad(
x=x, pad=[0, 0, 0, 0, 1, 2, 3, 4], mode="constant", constant_val=3.0
)
x3 = mb.log(x=x2)
y2 = mb.transpose(x=x3, perm=[0, 2, 3, 1])
return y2
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog), ["transpose", "pad", "log", "transpose"]
)
self.assertEqual(get_op_types_in_program(prog), ["pad", "log"])
assert list(block.find_ops(op_type="pad")[0].inputs["pad"].val.flatten()) == [
0,
0,
1,
2,
3,
4,
0,
0,
]
assert_model_is_valid(
prog,
{"x": (11, 2, 3, 6)},
expected_output_shapes={block.outputs[0].name: (11, 5, 10, 6)},
)
"""
Input graph:
const(shape=2)
|
V
input(shape=1,2,5,5)--->transpose(axis=[0,2,3,1])--->add---->transpose(axis=[0,3,1,2])--->out(shape=1,2,5,5)
Output graph:
const(shape=1,2,1,1)
|
V
input(shape=1,2,5,5)--->add--->out(shape=1,2,5,5)
"""
def test_fusion_with_add_constant_op(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))])
def prog(x):
x = mb.transpose(x=x, perm=[0, 2, 3, 1])
x = mb.add(x=x, y=np.array([10, 100]))
x = mb.transpose(x=x, perm=[0, 3, 1, 2])
return x
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog), ["transpose", "add", "transpose"]
)
self.assertEqual(get_op_types_in_program(prog), ["add"])
assert_model_is_valid(
prog,
{"x": (1, 2, 5, 5)},
expected_output_shapes={block.outputs[0].name: (1, 2, 5, 5)},
)
"""
Input graph:
const(scalar)
|
V
input(shape=1,2,5,5)--->transpose(axis=[0,2,3,1])--->add---->transpose(axis=[0,3,1,2])--->out(shape=1,2,5,5)
Output graph:
const(scalar)
|
V
input(shape=1,2,5,5)--->add--->out(shape=1,2,5,5)
"""
def test_fusion_with_add_scalar_constant_op(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))])
def prog(x):
x = mb.transpose(x=x, perm=[0, 2, 3, 1])
x = mb.add(x=5, y=x)
x = mb.transpose(x=x, perm=[0, 3, 1, 2])
return x
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog), ["transpose", "add", "transpose"]
)
self.assertEqual(get_op_types_in_program(prog), ["add"])
assert_model_is_valid(
prog,
{"x": (1, 2, 5, 5)},
expected_output_shapes={block.outputs[0].name: (1, 2, 5, 5)},
)
"""
Input graph:
input(shape=1,2,5,5)----->transpose(axis=[0,2,3,1])--->add---->transpose(axis=[0,3,1,2])--->out(shape=1,2,5,5)
| ^
| |
|---->relu---->transpose(axis=[0,2,3,1])
Output graph:
input(shape=1,2,5,5)----->add--->out(shape=1,2,5,5)
| ^
| |
|------>relu
"""
def test_fusion_with_add_broadcastable_0(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))])
def prog(x):
x1 = mb.transpose(x=x, perm=[0, 2, 3, 1])
x2 = mb.relu(x=x)
x2 = mb.transpose(x=x2, perm=[0, 2, 3, 1])
x3 = mb.add(x=x1, y=x2)
y = mb.transpose(x=x3, perm=[0, 3, 1, 2])
return y
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog),
["transpose", "relu", "transpose", "add", "transpose"],
)
self.assertEqual(get_op_types_in_program(prog), ["relu", "add"])
assert block.find_ops(op_type="relu")[0].inputs["x"] == block.inputs["x"]
assert block.find_ops(op_type="add")[0].inputs["x"] == block.inputs["x"]
assert (
block.find_ops(op_type="add")[0].inputs["y"]
== block.find_ops(op_type="relu")[0].outputs[0]
)
assert_model_is_valid(
prog,
{"x": (1, 2, 5, 5)},
expected_output_shapes={block.outputs[0].name: (1, 2, 5, 5)},
)
"""
Input graph:
input(shape=1,2,5,5)----->transpose(axis=[0,2,3,1])--->add---->transpose(axis=[0,3,1,2])--->out(shape=1,2,5,5)
| ^
| |
|----------------------->transpose(axis=[0,2,3,1])
Output graph:
input(shape=1,2,5,5)----->add--->out(shape=1,2,5,5)
| ^
| |
|---------
"""
def test_fusion_with_add_broadcastable_1(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))])
def prog(x):
x1 = mb.transpose(x=x, perm=[0, 2, 3, 1])
x2 = mb.transpose(x=x, perm=[0, 2, 3, 1])
x3 = mb.add(x=x1, y=x2)
y = mb.transpose(x=x3, perm=[0, 3, 1, 2])
return y
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog),
["transpose", "transpose", "add", "transpose"],
)
self.assertEqual(get_op_types_in_program(prog), ["add"])
assert block.find_ops(op_type="add")[0].inputs["x"] == block.inputs["x"]
assert block.find_ops(op_type="add")[0].inputs["y"] == block.inputs["x"]
assert_model_is_valid(
prog,
{"x": (1, 2, 5, 5)},
expected_output_shapes={block.outputs[0].name: (1, 2, 5, 5)},
)
"""
Input graph:
input(shape=1,2,5,5)--->transpose(axis=[0,2,3,1])---> relu---->concat(axis=3)----->transpose(axis=[0,3,1,2])----->out1(shape=1,4,5,5)
| ^
| |
|->transpose(axis=[0,2,3,1])--->relu------------
Output graph:
input(shape=1,2,5,5)------> relu---->concat(axis=1)--->out1(shape=1,4,5,5)
| ^
| |
|---->relu------------
"""
def test_concat_pattern_0(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))])
def prog(x):
x1 = mb.transpose(x=x, perm=[0, 2, 3, 1])
x2 = mb.transpose(x=x, perm=[0, 2, 3, 1])
x1 = mb.relu(x=x1)
x2 = mb.relu(x=x2)
x3 = mb.concat(values=[x1, x2], axis=3)
x4 = mb.transpose(x=x3, perm=[0, 3, 1, 2])
return x4
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog),
["transpose", "transpose", "relu", "relu", "concat", "transpose"],
)
self.assertEqual(get_op_types_in_program(prog), ["relu", "relu", "concat"])
assert_model_is_valid(
prog,
{"x": (1, 2, 5, 5)},
expected_output_shapes={block.outputs[0].name: (1, 4, 5, 5)},
)
"""
Input graph:
input(shape=1,2,5,5)--->transpose(axis=[0,2,3,1])---> relu---->concat(axis=3)----->transpose(axis=[0,3,1,2])----->out1(shape=1,4,5,5)
| ^
| |
|->transpose(axis=[0,2,3,1])------->relu--------
|
V
pool--->out2(shape=1,5,5,2)
Output graph:
input(shape=1,2,5,5)------> relu---->concat(axis=1)--->out1(shape=1,4,5,5)
| ^
| |
|---->relu------------
|
|--->transpose(axis=[0,2,3,1])---->pool--->out2(shape=1,5,5,2)
"""
def test_concat_pattern_1(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))])
def prog(x):
x1 = mb.transpose(x=x, perm=[0, 2, 3, 1])
x2 = mb.transpose(x=x, perm=[0, 2, 3, 1])
x1 = mb.relu(x=x1)
x2 = mb.relu(x=x2)
x3 = mb.concat(values=[x1, x2], axis=3)
x4 = mb.transpose(x=x3, perm=[0, 3, 1, 2])
x5 = mb.avg_pool(
x=x2, kernel_sizes=[1, 1], strides=[1, 1], pad_type="valid"
)
return x4, x5
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog),
[
"transpose",
"transpose",
"relu",
"relu",
"concat",
"transpose",
"avg_pool",
],
)
self.assertEqual(
get_op_types_in_program(prog),
["relu", "relu", "concat", "transpose", "avg_pool"],
)
assert_model_is_valid(
prog,
{"x": (1, 2, 5, 5)},
expected_output_shapes={
block.outputs[0].name: (1, 4, 5, 5),
block.outputs[1].name: (1, 5, 5, 2),
},
)
"""
Input graph:
input(shape=1,2,5,5)--->transpose(axis=[0,2,3,1])---> relu---->concat(axis=3)----->transpose(axis=[0,3,1,2])----->out1(shape=1,4,5,5)
| ^
| |
|->transpose(axis=[0,2,3,1])------->relu--------
|
V
relu--->out2(shape=1,5,5,2)
Output graph:
input(shape=1,2,5,5)------> relu---->concat(axis=1)--->out1(shape=1,4,5,5)
| ^
| |
|---->relu------------
|
|--->relu---->transpose(axis=[0,2,3,1])---->out2(shape=1,5,5,2)
"""
def test_concat_pattern_2(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))])
def prog(x):
x1 = mb.transpose(x=x, perm=[0, 2, 3, 1])
x2 = mb.transpose(x=x, perm=[0, 2, 3, 1])
x1 = mb.relu(x=x1)
x2 = mb.relu(x=x2)
x3 = mb.concat(values=[x1, x2], axis=3)
x4 = mb.transpose(x=x3, perm=[0, 3, 1, 2])
x5 = mb.relu(x=x2)
return x4, x5
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog),
["transpose", "transpose", "relu", "relu", "concat", "transpose", "relu"],
)
self.assertEqual(
get_op_types_in_program(prog),
["relu", "relu", "concat", "relu", "transpose"],
)
assert_model_is_valid(
prog,
{"x": (1, 2, 5, 5)},
expected_output_shapes={
block.outputs[0].name: (1, 4, 5, 5),
block.outputs[1].name: (1, 5, 5, 2),
},
)
"""
Input graph:
input(shape=1,2,5,5)--->transpose(axis=[0,2,3,1])---> relu---->concat(axis=3)----->transpose(axis=[0,3,1,2])----->out1(shape=1,4,5,5)
| ^
| |
|->transpose(axis=[0,2,3,1])------->relu--------
|
V
out2(shape=1,5,5,2)
Output graph:
input(shape=1,2,5,5)------> relu---->concat(axis=1)--->out1(shape=1,4,5,5)
| ^
| |
|---->relu------------
|
|--->transpose(axis=[0,2,3,1])---->out2(shape=1,5,5,2)
"""
def test_concat_pattern_3(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))])
def prog(x):
x1 = mb.transpose(x=x, perm=[0, 2, 3, 1])
x2 = mb.transpose(x=x, perm=[0, 2, 3, 1])
x1 = mb.relu(x=x1)
x2 = mb.relu(x=x2)
x3 = mb.concat(values=[x1, x2], axis=3)
x4 = mb.transpose(x=x3, perm=[0, 3, 1, 2])
return x4, x2
prev_prog, prev_block, block = apply_pass_and_basic_check(
prog, "common::reduce_transposes"
)
self.assertEqual(
get_op_types_in_program(prev_prog),
["transpose", "transpose", "relu", "relu", "concat", "transpose"],
)
self.assertEqual(
get_op_types_in_program(prog), ["relu", "relu", "concat", "transpose"]
)
assert_model_is_valid(
prog,
{"x": (1, 2, 5, 5)},
expected_output_shapes={
block.outputs[0].name: (1, 4, 5, 5),
block.outputs[1].name: (1, 5, 5, 2),
},
)
"""
Input graph:
input(shape=1,2,5,5)--->transpose(axis=[0,2,3,1])---> relu---->concat(axis=3)----->transpose(axis=[0,3,1,2])----->out1(shape=1,4,5,5)
| ^
| |
|->transpose(axis=[0,2,3,1])------->relu--------
|
V
transpose(axis=[0,3,1,2]) -----> out2(shape=1,2,5,5)
Output graph:
input(shape=1,2,5,5)---> relu---->concat(axis=1)----->out1(shape=1,4,5,5)
| ^
| |
|------------------->relu-------->out2(shape=1,2,5,5)
"""
def test_concat_pattern_4(self):
@mb.program(input_specs=[mb.TensorSpec(shape=(1, 2, 5, 5))])
def prog(x):
x1 = mb.transpose(x=x, perm=[0, 2, 3, 1])
x2 = mb.transpose(x=x, perm=[0, 2, 3, 1])
x1 = mb.relu(x=x1)
x2 = mb.relu(x=x2)
x3 = mb.concat(values=[x1, x2], axis=3)
x4 = mb.transpose(x=x3, perm=[0, 3, 1, 2])
x5 = mb.transpose(x=x2, perm=[0, 3, 1, 2])
return x4, x5
prev_prog, prev_block, block = | |
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2019 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
import pytest
from _pytest.outcomes import Skipped
import os
import numpy as np
import pyuvdata.tests as uvtest
from pyuvdata import UVData, UVCal, utils as uvutils
from pyuvdata.data import DATA_PATH
from pyuvdata import UVFlag
from ..uvflag import lst_from_uv, flags2waterfall, and_rows_cols
from pyuvdata import __version__
import shutil
import copy
import warnings
import h5py
import pathlib
test_d_file = os.path.join(DATA_PATH, "zen.2457698.40355.xx.HH.uvcAA.uvh5")
test_c_file = os.path.join(DATA_PATH, "zen.2457555.42443.HH.uvcA.omni.calfits")
test_f_file = test_d_file.rstrip(".uvh5") + ".testuvflag.h5"
pyuvdata_version_str = " Read/written with pyuvdata version: " + __version__ + "."
pytestmark = pytest.mark.filterwarnings(
"ignore:telescope_location is not set. Using known values for HERA.",
"ignore:antenna_positions is not set. Using known values for HERA.",
)
@pytest.fixture(scope="session")
def uvdata_obj_main():
uvdata_object = UVData()
uvdata_object.read(test_d_file)
yield uvdata_object
# cleanup
del uvdata_object
return
@pytest.fixture(scope="function")
def uvdata_obj(uvdata_obj_main):
uvdata_object = uvdata_obj_main.copy()
yield uvdata_object
# cleanup
del uvdata_object
return
# The following three fixtures are used regularly
# to initizize UVFlag objects from standard files
# We need to define these here in order to set up
# some skips for developers who do not have `pytest-cases` installed
@pytest.fixture(scope="function")
def uvf_from_data(uvdata_obj):
uvf = UVFlag()
uvf.from_uvdata(uvdata_obj)
# yield the object for the test
yield uvf
# do some cleanup
del (uvf, uvdata_obj)
@pytest.fixture(scope="function")
def uvf_from_uvcal():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag()
uvf.from_uvcal(uvc)
# the antenna type test file is large, so downselect to speed up
if uvf.type == "antenna":
uvf.select(antenna_nums=uvf.ant_array[:5])
# yield the object for the test
yield uvf
# do some cleanup
del (uvf, uvc)
@pytest.fixture(scope="function")
def uvf_from_waterfall(uvdata_obj):
uvf = UVFlag()
uvf.from_uvdata(uvdata_obj, waterfall=True)
# yield the object for the test
yield uvf
# do some cleanup
del uvf
# Try to import `pytest-cases` and define decorators used to
# iterate over the three main types of UVFlag objects
# otherwise make the decorators skip the tests that use these iterators
try:
pytest_cases = pytest.importorskip("pytest_cases", minversion="1.12.1")
cases_decorator = pytest_cases.parametrize(
"input_uvf",
[
pytest_cases.fixture_ref(uvf_from_data),
pytest_cases.fixture_ref(uvf_from_uvcal),
pytest_cases.fixture_ref(uvf_from_waterfall),
],
)
cases_decorator_no_waterfall = pytest_cases.parametrize(
"input_uvf",
[
pytest_cases.fixture_ref(uvf_from_data),
pytest_cases.fixture_ref(uvf_from_uvcal),
],
)
# This warning is raised by pytest_cases
# It is due to a feature the developer does
# not know how to handle yet. ignore for now.
warnings.filterwarnings(
"ignore",
message="WARNING the new order is not" + " taken into account !!",
append=True,
)
except Skipped:
cases_decorator = pytest.mark.skipif(
True, reason="pytest-cases not installed or not required version"
)
cases_decorator_no_waterfall = pytest.mark.skipif(
True, reason="pytest-cases not installed or not required version"
)
@pytest.fixture()
def test_outfile(tmp_path):
yield str(tmp_path / "outtest_uvflag.h5")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_check_flag_array(uvdata_obj):
uvf = UVFlag()
uvf.from_uvdata(uvdata_obj, mode="flag")
uvf.flag_array = np.ones((uvf.flag_array.shape), dtype=int)
with pytest.raises(
ValueError, match="UVParameter _flag_array is not the appropriate type.",
):
uvf.check()
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_init_bad_mode(uvdata_obj):
uv = uvdata_obj
with pytest.raises(ValueError) as cm:
UVFlag(uv, mode="bad_mode", history="I made a UVFlag object", label="test")
assert str(cm.value).startswith("Input mode must be within acceptable")
uv = UVCal()
uv.read_calfits(test_c_file)
with pytest.raises(ValueError) as cm:
UVFlag(uv, mode="bad_mode", history="I made a UVFlag object", label="test")
assert str(cm.value).startswith("Input mode must be within acceptable")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_init_uvdata(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv, history="I made a UVFlag object", label="test")
assert uvf.metric_array.shape == uv.flag_array.shape
assert np.all(uvf.metric_array == 0)
assert uvf.weights_array.shape == uv.flag_array.shape
assert np.all(uvf.weights_array == 1)
assert uvf.type == "baseline"
assert uvf.mode == "metric"
assert np.all(uvf.time_array == uv.time_array)
assert np.all(uvf.lst_array == uv.lst_array)
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.polarization_array)
assert np.all(uvf.baseline_array == uv.baseline_array)
assert np.all(uvf.ant_1_array == uv.ant_1_array)
assert np.all(uvf.ant_2_array == uv.ant_2_array)
assert "I made a UVFlag object" in uvf.history
assert 'Flag object with type "baseline"' in uvf.history
assert pyuvdata_version_str in uvf.history
assert uvf.label == "test"
assert uvf.filename == uv.filename
def test_add_extra_keywords(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv, history="I made a UVFlag object", label="test")
uvf.extra_keywords = {"keyword1": 1, "keyword2": 2}
assert "keyword1" in uvf.extra_keywords
assert "keyword2" in uvf.extra_keywords
uvf.extra_keywords["keyword3"] = 3
assert "keyword3" in uvf.extra_keywords
assert uvf.extra_keywords.get("keyword1") == 1
assert uvf.extra_keywords.get("keyword2") == 2
assert uvf.extra_keywords.get("keyword3") == 3
def test_read_extra_keywords(uvdata_obj):
uv = uvdata_obj
uv.extra_keywords = {"keyword1": 1, "keyword2": 2}
assert "keyword1" in uv.extra_keywords
assert "keyword2" in uv.extra_keywords
uvf = UVFlag(uv, history="I made a UVFlag object", label="test")
assert "keyword1" in uvf.extra_keywords
assert "keyword2" in uvf.extra_keywords
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_init_uvdata_x_orientation(uvdata_obj):
uv = uvdata_obj
uv.x_orientation = "east"
uvf = UVFlag(uv, history="I made a UVFlag object", label="test")
assert uvf.x_orientation == uv.x_orientation
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize("future_shapes", [True, False])
def test_init_uvdata_copy_flags(uvdata_obj, future_shapes):
uv = uvdata_obj
if future_shapes:
uv.use_future_array_shapes()
with uvtest.check_warnings(UserWarning, 'Copying flags to type=="baseline"'):
uvf = UVFlag(uv, copy_flags=True, mode="metric")
# with copy flags uvf.metric_array should be none
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
if future_shapes:
assert np.array_equal(uvf.flag_array[:, 0, :, :], uv.flag_array)
else:
assert np.array_equal(uvf.flag_array, uv.flag_array)
assert uvf.weights_array is None
assert uvf.type == "baseline"
assert uvf.mode == "flag"
assert np.all(uvf.time_array == uv.time_array)
assert np.all(uvf.lst_array == uv.lst_array)
if future_shapes:
assert np.all(uvf.freq_array == uv.freq_array)
else:
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.polarization_array)
assert np.all(uvf.baseline_array == uv.baseline_array)
assert np.all(uvf.ant_1_array == uv.ant_1_array)
assert np.all(uvf.ant_2_array == uv.ant_2_array)
assert 'Flag object with type "baseline"' in uvf.history
assert pyuvdata_version_str in uvf.history
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_init_uvdata_mode_flag(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag()
uvf.from_uvdata(uv, copy_flags=False, mode="flag")
# with copy flags uvf.metric_array should be none
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
assert np.array_equal(uvf.flag_array, uv.flag_array)
assert uvf.weights_array is None
assert uvf.type == "baseline"
assert uvf.mode == "flag"
assert np.all(uvf.time_array == uv.time_array)
assert np.all(uvf.lst_array == uv.lst_array)
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.polarization_array)
assert np.all(uvf.baseline_array == uv.baseline_array)
assert np.all(uvf.ant_1_array == uv.ant_1_array)
assert np.all(uvf.ant_2_array == uv.ant_2_array)
assert 'Flag object with type "baseline"' in uvf.history
assert pyuvdata_version_str in uvf.history
def test_init_uvcal():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
assert uvf.metric_array.shape == uvc.flag_array.shape
assert np.all(uvf.metric_array == 0)
assert uvf.weights_array.shape == uvc.flag_array.shape
assert np.all(uvf.weights_array == 1)
assert uvf.type == "antenna"
assert uvf.mode == "metric"
assert np.all(uvf.time_array == uvc.time_array)
assert uvf.x_orientation == uvc.x_orientation
lst = lst_from_uv(uvc)
assert np.all(uvf.lst_array == lst)
assert np.all(uvf.freq_array == uvc.freq_array[0])
assert np.all(uvf.polarization_array == uvc.jones_array)
assert np.all(uvf.ant_array == uvc.ant_array)
assert 'Flag object with type "antenna"' in uvf.history
assert pyuvdata_version_str in uvf.history
assert uvf.filename == uvc.filename
def test_init_uvcal_mode_flag():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc, copy_flags=False, mode="flag")
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
assert np.array_equal(uvf.flag_array, uvc.flag_array)
assert uvf.weights_array is None
assert uvf.type == "antenna"
assert uvf.mode == "flag"
assert np.all(uvf.time_array == uvc.time_array)
lst = lst_from_uv(uvc)
assert np.all(uvf.lst_array == lst)
assert np.all(uvf.freq_array == uvc.freq_array[0])
assert np.all(uvf.polarization_array == uvc.jones_array)
assert np.all(uvf.ant_array == uvc.ant_array)
assert 'Flag object with type "antenna"' in uvf.history
assert pyuvdata_version_str in uvf.history
def test_init_cal_copy_flags():
uv = UVCal()
uv.read_calfits(test_c_file)
with uvtest.check_warnings(UserWarning, 'Copying flags to type=="antenna"'):
uvf = UVFlag(uv, copy_flags=True, mode="metric")
# with copy flags uvf.metric_array should be none
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
assert np.array_equal(uvf.flag_array, uv.flag_array)
assert uvf.type == "antenna"
assert uvf.mode == "flag"
assert np.all(uvf.time_array == np.unique(uv.time_array))
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.jones_array)
assert pyuvdata_version_str in uvf.history
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize("future_shapes", [True, False])
def test_init_waterfall_uvd(uvdata_obj, future_shapes):
uv = uvdata_obj
if future_shapes:
uv.use_future_array_shapes()
uvf = UVFlag(uv, waterfall=True)
assert uvf.metric_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Npols)
assert np.all(uvf.metric_array == 0)
assert uvf.weights_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Npols)
assert np.all(uvf.weights_array == 1)
assert uvf.type == "waterfall"
assert uvf.mode == "metric"
assert np.all(uvf.time_array == np.unique(uv.time_array))
assert np.all(uvf.lst_array == np.unique(uv.lst_array))
if future_shapes:
assert np.all(uvf.freq_array == uv.freq_array)
else:
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.polarization_array)
assert 'Flag object with type "waterfall"' in uvf.history
assert pyuvdata_version_str in uvf.history
def test_init_waterfall_uvc():
uv = UVCal()
uv.read_calfits(test_c_file)
uvf = UVFlag(uv, waterfall=True, history="input history check")
assert uvf.metric_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Njones)
assert np.all(uvf.metric_array == 0)
assert uvf.weights_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Njones)
assert np.all(uvf.weights_array == 1)
assert uvf.type == "waterfall"
assert uvf.mode == "metric"
assert np.all(uvf.time_array == np.unique(uv.time_array))
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.jones_array)
assert 'Flag object with type "waterfall"' in uvf.history
assert "input history check" in uvf.history
assert pyuvdata_version_str in uvf.history
def test_init_waterfall_flag_uvcal():
uv = UVCal()
uv.read_calfits(test_c_file)
uvf = UVFlag(uv, waterfall=True, mode="flag")
assert uvf.flag_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Njones)
assert not np.any(uvf.flag_array)
assert uvf.weights_array is None
assert uvf.type == "waterfall"
assert uvf.mode == "flag"
assert np.all(uvf.time_array == np.unique(uv.time_array))
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.jones_array)
assert 'Flag object with type "waterfall"' in uvf.history
assert pyuvdata_version_str in uvf.history
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_init_waterfall_flag_uvdata(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv, | |
``X``. Next calls the
``draw`` method of the Yellowbrick visualizer, finally returning a new array of
transformed features of shape ``(len(X), projection)``.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features.
y : ndarray or Series of length n
An array or series of target or class values.
Returns
-------
Xp : ndarray or DataFrame of shape n x m
Returns a new array-like object of transformed features of shape
``(len(X), projection)``.
"""
try:
Xp = self.pca_transformer.transform(X)
self.draw(Xp, y)
return Xp
except NotFittedError:
raise NotFitted.from_estimator(self, "transform")
def draw(self, Xp, y):
"""
Plots a scatterplot of points that represented the decomposition,
`pca_features_`, of the original features, `X`, projected into either 2 or
3 dimensions.
If 2 dimensions are selected, a colorbar and heatmap can also be optionally
included to show the magnitude of each feature value to the component.
Parameters
----------
Xp : array-like of shape (n, 2) or (n, 3)
The matrix produced by the ``transform()`` method.
y : array-like of shape (n,), optional
The target, used to specify the colors of the points.
Returns
-------
self.ax : matplotlib Axes object
Returns the axes that the scatter plot was drawn on.
"""
# Call to super draw which draws the scatter plot.
super(PCA, self).draw(Xp, y)
if self.proj_features:
# Draws projection features in transformed space.
self._draw_projection_features(Xp, y)
if self.projection == 2:
if self.heatmap:
if not self.colormap:
self.colormap = palettes.DEFAULT_SEQUENCE
# TODO: change to pcolormesh instead of imshow per #615 spec
im = self.lax.imshow(
self.pca_components_,
interpolation="none",
cmap=self.colormap,
aspect="auto",
)
plt.colorbar(
im,
cax=self.uax,
orientation="horizontal",
ticks=[self.pca_components_.min(), 0, self.pca_components_.max()],
)
return self.ax
def _draw_projection_features(self, Xp, y):
"""
Draw the projection of features in the transformed space.
Parameters
----------
Xp : array-like of shape (n, 2) or (n, 3)
The matrix produced by the ``transform()`` method.
y : array-like of shape (n,), optional
The target, used to specify the colors of the points.
Returns
-------
self.ax : matplotlib Axes object
Returns the axes that the scatter plot was drawn on.
"""
x_vector = self.pca_components_[0]
y_vector = self.pca_components_[1]
max_x = max(Xp[:, 0])
max_y = max(Xp[:, 1])
if self.projection == 2:
for i in range(self.pca_components_.shape[1]):
self.ax.arrow(
x=0,
y=0,
dx=x_vector[i] * max_x,
dy=y_vector[i] * max_y,
color="r",
head_width=0.05,
width=0.005,
)
self.ax.text(
x_vector[i] * max_x * 1.05,
y_vector[i] * max_y * 1.05,
self.features_[i],
color="r",
)
elif self.projection == 3:
z_vector = self.pca_components_[2]
max_z = max(Xp[:, 1])
for i in range(self.pca_components_.shape[1]):
self.ax.plot(
[0, x_vector[i] * max_x],
[0, y_vector[i] * max_y],
[0, z_vector[i] * max_z],
color="r",
)
self.ax.text(
x_vector[i] * max_x * 1.05,
y_vector[i] * max_y * 1.05,
z_vector[i] * max_z * 1.05,
self.features_[i],
color="r",
)
else:
raise YellowbrickValueError("Projection dimensions must be either 2 or 3")
return self.ax
def finalize(self, **kwargs):
"""
Draws the title, labels, legends, heatmap, and colorbar as specified by the
keyword arguments.
"""
super(PCA, self).finalize()
self.ax.set_title("Principal Component Plot")
self.ax.set_xlabel("$PC_1$")
self.ax.set_ylabel("$PC_2$")
if self.projection == 3:
self.ax.set_zlabel("$PC_3$")
if self.heatmap == True:
self.lax.set_xticks(np.arange(-0.5, len(self.features_)))
self.lax.set_xticklabels([])
# Makes the labels centered.
self.lax.set_xticks(np.arange(0, len(self.features_)), minor=True)
self.lax.set_xticklabels(
self.features_, rotation=90, fontsize=12, minor=True
)
self.lax.set_yticks(np.arange(0.5, 2))
self.lax.set_yticklabels(["$PC_1$", "$PC_2$"], va="bottom", fontsize=10)
self.fig.tight_layout()
##########################################################################
## Quick Method
##########################################################################
def pca_decomposition(
X,
y=None,
ax=None,
features=None,
classes=None,
scale=True,
projection=2,
proj_features=False,
colors=None,
colormap=None,
alpha=0.75,
random_state=None,
colorbar=True,
heatmap=False,
show=True,
**kwargs
):
"""
Produce a two or three dimensional principal component plot of the data array ``X``
projected onto its largest sequential principal components. It is common practice
to scale the data array ``X`` before applying a PC decomposition. Variable scaling
can be controlled using the ``scale`` argument.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features.
y : ndarray or Series of length n
An array or series of target or class values.
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in, the current axes
will be used (or generated if required).
features : list, default: None
The names of the features specified by the columns of the input dataset.
This length of this list must match the number of columns in X, otherwise
an exception will be raised on ``fit()``.
classes : list, default: None
The class labels for each class in y, ordered by sorted class index. These
names act as a label encoder for the legend, identifying integer classes
or renaming string labels. If omitted, the class labels will be taken from
the unique values in y.
Note that the length of this list must match the number of unique values in
y, otherwise an exception is raised. This parameter is only used in the
discrete target type case and is ignored otherwise.
scale : bool, default: True
Boolean that indicates if user wants to scale data.
projection : int or string, default: 2
The number of axes to project into, either 2d or 3d. To plot 3d plots
with matplotlib, please ensure a 3d axes is passed to the visualizer,
otherwise one will be created using the current figure.
proj_features : bool, default: False
Boolean that indicates if the user wants to project the features
in the projected space. If True the plot will be similar to a biplot.
colors : list or tuple, default: None
A single color to plot all instances as or a list of colors to color each
instance according to its class in the discrete case or as an ordered
colormap in the sequential case. If not enough colors per class are
specified then the colors are treated as a cycle.
colormap : string or cmap, default: None
The colormap used to create the individual colors. In the discrete case
it is used to compute the number of colors needed for each class and
in the continuous case it is used to create a sequential color map based
on the range of the target.
alpha : float, default: 0.75
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
random_state : int, RandomState instance or None, optional (default None)
This parameter sets the random state on this solver. If the input X is
larger than 500x500 and the number of components to extract is lower
than 80% of the smallest dimension of the data, then the more efficient
`randomized` solver is enabled.
colorbar : bool, default: True
If the target_type is "continous" draw a colorbar to the right of the
scatter plot. The colobar axes is accessible using the cax property.
heatmap : bool, default: False
Add a heatmap showing contribution of each feature in the principal components.
Also draws a colorbar for readability purpose. The heatmap is accessible
using lax property and colorbar using uax property.
show : bool, default: True
If True, calls ``show()``, which in turn calls ``plt.show()`` however you cannot
call ``plt.savefig`` from this signature, nor ``clear_figure``. If False, simply
calls ``finalize()``
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Attributes
----------
pca_components_ : ndarray, shape (n_features, n_components)
This tells about the magnitude of each feature in the pricipal components.
This is primarily used to draw the biplots.
classes_ : ndarray, shape (n_classes,)
The class labels that define the discrete values in the target. Only
available if the target type is discrete. This is guaranteed to be
strings even if the classes are a different type.
features_ : ndarray, shape (n_features,)
The names of the features discovered or used in the visualizer that
can be used as an index to access or modify data in X. If a user passes
feature names in, those features are used. Otherwise the columns of a
DataFrame are used or just simply the indices of the data array.
range_ : (min y, max | |
#!/usr/bin/env python3
# tab-width:4
# pylint: disable=missing-docstring
# MIT License
# https://github.com/jakeogh/dnsgate/blob/master/LICENSE
#
# "psl domain" is "Public Second Level domain"
# extracted using https://publicsuffix.org/
# essentially this is the first level at which
# the public could register domains for a given TLD.
# flake8: noqa # flake8 has no per file settings :(
# pylint: disable=C0111 # docstrings are always outdated and wrong
# pylint: disable=C0114 # Missing module docstring (missing-module-docstring)
# pylint: disable=W0511 # todo is encouraged
# pylint: disable=C0301 # line too long
# pylint: disable=R0902 # too many instance attributes
# pylint: disable=C0302 # too many lines in module
# pylint: disable=C0103 # single letter var names, func name too descriptive
# pylint: disable=R0911 # too many return statements
# pylint: disable=R0912 # too many branches
# pylint: disable=R0915 # too many statements
# pylint: disable=R0913 # too many arguments
# pylint: disable=R1702 # too many nested blocks
# pylint: disable=R0914 # too many local variables
# pylint: disable=R0903 # too few public methods
# pylint: disable=E1101 # no member for base
# pylint: disable=W0201 # attribute defined outside __init__
# pylint: disable=R0916 # Too many boolean expressions in if statement
# pylint: disable=C0305 # Trailing newlines editor should fix automatically, pointless warning
# pylint: disable=C0413 # TEMP isort issue [wrong-import-position] Import "from pathlib import Path" should be placed at the top of the module [C0413]
__version__ = "0.0.1"
import ast
import configparser
import copy
import os
import shutil
import sys
import time
from pathlib import Path
from typing import Optional
import click
from logtool import LOG
from logtool import leprint
from logtool import set_verbose
from pathtool import backup_file_if_exists
from pathtool import comment_out_line_in_file
from pathtool import create_relative_symlink
from pathtool import file_exists_nonzero
from pathtool import is_broken_symlink
from pathtool import is_unbroken_symlink
from pathtool import uncomment_line_in_file
from pathtool import write_line_to_file
from stringtool import contains_whitespace
from urltool import extract_domain_set_from_dnsgate_format_file
from urltool import extract_psl_domain
from urltool import group_by_tld
from urltool import prune_redundant_rules
from urltool import strip_to_psl
from urltool import validate_domain_list
from .cache import get_domains_from_url
from .config import DnsgateConfig
from .config import dnsmasq_config_file_line
from .file_headers import make_custom_blacklist_header
from .file_headers import make_custom_whitelist_header
from .file_headers import make_output_file_header
from .global_vars import CACHE_DIRECTORY
from .global_vars import CACHE_EXPIRE
from .global_vars import CONFIG_DIRECTORY
from .global_vars import CONFIG_FILE
from .global_vars import CUSTOM_BLACKLIST
from .global_vars import CUSTOM_WHITELIST
from .global_vars import DEFAULT_REMOTE_BLACKLISTS
from .global_vars import DNSMASQ_CONFIG_FILE
from .global_vars import DNSMASQ_CONFIG_INCLUDE_DIRECTORY
from .global_vars import DNSMASQ_CONFIG_SYMLINK
from .global_vars import OUTPUT_FILE_PATH
from .help import BACKUP_HELP
from .help import BLACKLIST_HELP
from .help import BLOCK_AT_PSL_HELP
from .help import BLOCKALL_HELP
from .help import CACHE_EXPIRE_HELP
from .help import CONFIGURE_HELP
from .help import DEST_IP_HELP
from .help import DISABLE_HELP
from .help import DNSMASQ_CONFIG_HELP
from .help import ENABLE_HELP
from .help import GENERATE_HELP
from .help import INSTALL_HELP_HELP
from .help import NO_CACHE_HELP
from .help import NO_RESTART_DNSMASQ_HELP
from .help import OUTPUT_FILE_HELP
from .help import VERBOSE_HELP
from .help import WHITELIST_HELP
from .help import dnsmasq_install_help
from .help import hosts_install_help
# todo, check return code, run disable() and try again if the service fails
def restart_dnsmasq_service():
if os.path.lexists('/etc/init.d/dnsmasq'):
os.system('/etc/init.d/dnsmasq restart 1>&2')
else:
os.system('systemctl restart dnsmasq 1>&2') # untested
return True
def append_to_local_rule_file(*,
path: Path,
idn: str,
verbose: bool,
debug: bool,
) -> None:
leprint("attempting to append %s to %s", idn, path, level=LOG['INFO'])
hostname = idn.encode('idna').decode('ascii')
leprint("appending hostname: %s to %s", hostname, path, level=LOG['DEBUG'])
line = hostname + '\n'
write_line_to_file(line=line, path=path, verbose=verbose, debug=debug)
# https://github.com/mitsuhiko/click/issues/441
CONTEXT_SETTINGS = \
dict(help_option_names=['--help'],
terminal_width=shutil.get_terminal_size((80, 20)).columns)
@click.group(context_settings=CONTEXT_SETTINGS)
@click.option('--no-restart-dnsmasq',
is_flag=True,
help=NO_RESTART_DNSMASQ_HELP,)
@click.option('--backup',
is_flag=True,
help=BACKUP_HELP,)
@click.option('--verbose',
is_flag=True,
help=VERBOSE_HELP,
callback=set_verbose,
expose_value=False,)
@click.pass_context
def dnsgate(ctx, no_restart_dnsmasq, backup):
"""
dnsgate combines, deduplicates, and optionally modifies local and
remote DNS blacklists. Use \"dnsgate (command) --help\"
for more information.
"""
config = configparser.ConfigParser()
if 'dnsgate configure' not in ' '.join(sys.argv):
if 'dnsgate.py configure' not in ' '.join(sys.argv):
try:
with open(CONFIG_FILE, 'r') as cf:
config.read_file(cf)
except FileNotFoundError:
leprint("No configuration file found, run " + "\"dnsgate configure --help\". Exiting.", level=LOG['ERROR'])
sys.exit(1)
mode = config['DEFAULT']['mode']
try:
output_path = config['DEFAULT']['output']
except KeyError:
leprint('ERROR: ' + CONFIG_FILE.as_posix() + ' has no "output" defined. ' + "run 'dnsgate configure --help' to fix. Exiting.", level=LOG['ERROR'])
sys.exit(1)
assert isinstance(output_path, str)
if not os.path.exists(os.path.dirname(output_path)):
leprint("ERROR: dnsgate is configured for 'mode = dnsmasq' in " + CONFIG_FILE.as_posix() + " but dnsmasq_config_file is not set. " + "run 'dnsgate configure --help' to fix. Exiting.", level=LOG['ERROR'])
sys.exit(1)
block_at_psl = config['DEFAULT'].getboolean('block_at_psl')
dest_ip = config['DEFAULT']['dest_ip'] # todo validate ip or False/None
if dest_ip == 'False':
dest_ip = None
sources = ast.literal_eval(config['DEFAULT']['sources']) # configparser has no .getlist()?
if mode == 'dnsmasq':
try:
dnsmasq_config_file = \
click.open_file(config['DEFAULT']['dnsmasq_config_file'],
'w', atomic=True, lazy=True)
dnsmasq_config_file.close() # it exists and is writeable
except KeyError:
leprint("ERROR: dnsgate is configured for 'mode = dnsmasq' in " + CONFIG_FILE.as_posix() + " but dnsmasq_config_file is not set. run 'dnsgate configure --help' to fix. Exiting.", level=LOG['ERROR'])
sys.exit(1)
ctx.obj = DnsgateConfig(mode=mode,
block_at_psl=block_at_psl,
dest_ip=dest_ip,
no_restart_dnsmasq=no_restart_dnsmasq,
dnsmasq_config_file=dnsmasq_config_file,
backup=backup,
sources=sources,
output=output_path,)
else:
if not dest_ip:
dest_ip = '0.0.0.0'
ctx.obj = DnsgateConfig(mode=mode,
block_at_psl=block_at_psl,
dest_ip=dest_ip,
no_restart_dnsmasq=no_restart_dnsmasq,
backup=backup,
sources=sources,
output=output_path,)
os.makedirs(CACHE_DIRECTORY, exist_ok=True)
@dnsgate.command(help=WHITELIST_HELP)
@click.argument('domains', required=True, nargs=-1)
@click.option('--verbose', is_flag=True)
@click.option('--debug', is_flag=True)
def whitelist(domains: Optional[tuple],
verbose: bool,
debug: bool,
) -> None:
if domains:
for domain in domains:
append_to_local_rule_file(path=CUSTOM_WHITELIST, idn=domain, verbose=verbose, debug=debug,)
context = click.get_current_context()
context.invoke(generate)
@dnsgate.command(help=BLACKLIST_HELP)
@click.argument('domains', required=True, nargs=-1)
@click.option('--verbose', is_flag=True)
@click.option('--debug', is_flag=True)
def blacklist(domains: Optional[tuple],
verbose: bool,
debug: bool,
) -> None:
if domains:
for domain in domains:
append_to_local_rule_file(path=CUSTOM_BLACKLIST, idn=domain, verbose=verbose, debug=debug,)
context = click.get_current_context()
context.invoke(generate)
@dnsgate.command(help=INSTALL_HELP_HELP)
@click.option('--verbose', is_flag=True)
@click.option('--debug', is_flag=True)
@click.pass_obj
def install_help(config,
verbose: bool,
debug: bool,
) -> None:
if config.mode == 'dnsmasq':
dnsmasq_install_help(dnsmasq_config_file=DNSMASQ_CONFIG_FILE, output_file=OUTPUT_FILE_PATH)
elif config.mode == 'hosts':
hosts_install_help()
@dnsgate.command(help=ENABLE_HELP)
@click.option('--verbose', is_flag=True)
@click.option('--debug', is_flag=True)
@click.pass_obj
def enable(config,
verbose: bool,
debug: bool,
) -> None:
if config.mode == 'dnsmasq':
if not file_exists_nonzero(OUTPUT_FILE_PATH):
leprint('ERROR: %s does not exist, run "dnsgate generate" to fix. Exiting.', OUTPUT_FILE_PATH, level=LOG['ERROR'])
sys.exit(1)
# verify generate() was last run in dnsmasq mode so dnsmasq does not
# fail when the service is restarted
with open(OUTPUT_FILE_PATH, 'r') as fh:
file_content = fh.read(550) #just check the header
if 'mode: dnsmasq' not in file_content:
leprint('ERROR: %s was not generated in dnsmasq mode, run "dnsgate generate --help" to fix. Exiting.', OUTPUT_FILE_PATH, level=LOG['ERROR'])
sys.exit(1)
dnsmasq_config_line = dnsmasq_config_file_line()
if not uncomment_line_in_file(path=config.dnsmasq_config_file, line=dnsmasq_config_line, verbose=verbose, debug=debug,):
write_line_to_file(line=dnsmasq_config_line, path=config.dnsmasq_config_file.name, unique=True, verbose=verbose, debug=debug,)
config.dnsmasq_config_file.close()
symlink = DNSMASQ_CONFIG_SYMLINK
if not os.path.islink(symlink): # not a symlink
if os.path.exists(symlink): # but exists
leprint("ERROR: " + symlink.as_posix() + " exists and is not a symlink. You need to manually delete it. Exiting.", level=LOG['ERROR'])
sys.exit(1)
if is_broken_symlink(symlink): #hm, a broken symlink, ok, remove it
leprint("WARNING: removing broken symlink: %s", symlink, level=LOG['WARNING'])
os.remove(symlink)
if not is_unbroken_symlink(symlink):
try:
os.remove(symlink) # maybe it was symlink to somewhere else
except FileNotFoundError:
pass # that's ok
create_relative_symlink(target=OUTPUT_FILE_PATH, link_name=symlink, verbose=verbose, debug=debug,)
restart_dnsmasq_service()
else:
leprint("ERROR: enable is only available with --mode dnsmasq. Exiting.", level=LOG['ERROR'])
sys.exit(1)
@dnsgate.command(help=DISABLE_HELP)
@click.argument('timeout', required=False, type=int)
@click.option('--verbose', is_flag=True)
@click.option('--debug', is_flag=True)
@click.pass_context
def disable(ctx,
timeout: int,
verbose: bool,
debug: bool,
) -> None:
'''TIMEOUT: re-enable after n seconds'''
config = ctx.obj
if config.mode == 'dnsmasq':
comment_out_line_in_file(path=config.dnsmasq_config_file, line=dnsmasq_config_file_line(), verbose=verbose, debug=debug,)
config.dnsmasq_config_file.close()
symlink = DNSMASQ_CONFIG_SYMLINK
if os.path.islink(symlink):
os.remove(symlink)
if not os.path.islink(symlink): # not a symlink
if os.path.exists(symlink): # but exists
leprint("ERROR: " + symlink.as_posix() + " exists and is not a symlink. You need to manually delete it. Exiting.", level=LOG['ERROR'])
sys.exit(1)
restart_dnsmasq_service()
leprint("Sleepin %ss:", timeout)
time.sleep(timeout)
ctx.invoke(enable)
else:
leprint("ERROR: disable is only available with --mode dnsmasq. Exiting.",
level=LOG['ERROR'])
sys.exit(1)
@dnsgate.command(help=BLOCKALL_HELP)
@click.option('--verbose', is_flag=True)
@click.option('--debug', is_flag=True)
@click.pass_obj
def blockall(config,
verbose: bool,
debug: bool,
) -> None:
if config.mode == 'dnsmasq':
domains_combined = set(['.'])
write_output_file(config=config, domains_combined=domains_combined, verbose=verbose, debug=debug,)
else:
leprint("ERROR: blockall is only available with --mode dnsmasq. Exiting.",
level=LOG['ERROR'])
sys.exit(1)
@click.pass_obj
def write_output_file(*,
config,
domains_combined: set,
verbose: bool,
debug: bool,
):
config_dict = make_config_dict(config)
leprint("Writing output file: %s in %s format", config.output, config.mode, level=LOG['INFO'])
with click.open_file(config.output, 'wb', atomic=True, lazy=True) as fh:
fh.write(make_output_file_header(config_dict))
for domain in domains_combined:
if config.mode == 'dnsmasq':
if config.dest_ip:
dnsmasq_line = 'address=/.' + domain.decode('utf8') + '/' + config.dest_ip + '\n'
else:
dnsmasq_line = 'server=/.' + domain.decode('utf8') + '/' '\n' # return NXDOMAIN
fh.write(dnsmasq_line.encode('utf8'))
elif config.mode == 'hosts':
if config.dest_ip:
hosts_line = config.dest_ip + ' ' + domain.decode('utf8') + '\n'
else:
hosts_line = '127.0.0.1' + ' ' + domain.decode('utf8') + '\n'
fh.write(hosts_line.encode('utf8'))
@dnsgate.command(help=CONFIGURE_HELP, short_help='write /etc/dnsgate/config')
@click.argument('sources', nargs=-1)
@click.option('--mode', is_flag=False,
type=click.Choice(['dnsmasq', 'hosts']),
required=True,)
@click.option('--block-at-psl',
is_flag=True,
help=BLOCK_AT_PSL_HELP,)
@click.option('--dest-ip',
is_flag=False,
help=DEST_IP_HELP,
default=None,)
@click.option('--dnsmasq-config-file',
is_flag=False,
help=DNSMASQ_CONFIG_HELP,
type=click.File(mode='w', atomic=True, lazy=True),
default=DNSMASQ_CONFIG_FILE,)
@click.option('--output',
is_flag=False,
help=OUTPUT_FILE_HELP,
default=OUTPUT_FILE_PATH,)
def configure(sources: Optional[tuple[str]],
mode: str,
block_at_psl: bool,
dest_ip: str,
dnsmasq_config_file: Path,
output: Path,
):
if contains_whitespace(dnsmasq_config_file.name):
leprint("ERROR: --dnsmasq-config-file can not contain whitespace. Exiting.",
level=LOG['ERROR'])
sys.exit(1)
if not sources:
sources = DEFAULT_REMOTE_BLACKLISTS
os.makedirs(CONFIG_DIRECTORY, exist_ok=True)
config = configparser.ConfigParser()
config['DEFAULT'] = \
{
'mode': mode,
'block_at_psl': block_at_psl,
'dest_ip': dest_ip,
'sources': sources,
'output': output
}
if mode == 'dnsmasq':
os.makedirs(DNSMASQ_CONFIG_INCLUDE_DIRECTORY, exist_ok=True)
config['DEFAULT']['dnsmasq_config_file'] = dnsmasq_config_file.name
with open(CONFIG_FILE, 'w') as cf:
config.write(cf)
if not os.path.exists(CUSTOM_BLACKLIST):
with open(CUSTOM_BLACKLIST, 'w') as fh: # not 'wb', utf8 is ok
fh.write(make_custom_blacklist_header(CUSTOM_BLACKLIST))
if not os.path.exists(CUSTOM_WHITELIST):
with open(CUSTOM_WHITELIST, | |
dict()
resource_to_mod_issn_nlm[mod] = dict()
filename = base_path + 'dqm_data/RESOURCE_' + mod + '.json'
try:
with open(filename, 'r') as f:
dqm_data = json.load(f)
for entry in dqm_data['data']:
primary_id = entry['primaryId']
values_to_add = []
for field in resource_fields:
if field in entry:
value = simplify_text_keep_digits(entry[field])
values_to_add.append(value)
if 'abbreviationSynonyms' in entry:
for synonym in entry['abbreviationSynonyms']:
value = simplify_text_keep_digits(synonym)
values_to_add.append(value)
for value in values_to_add:
if value in resource_to_mod:
if primary_id not in resource_to_mod[mod][value]:
resource_to_mod[mod][value].append(primary_id)
else:
resource_to_mod[mod][value] = [primary_id]
if 'crossReferences' in entry:
for xref_entry in entry['crossReferences']:
# if re.match(r"^ISSN:[0-9]+", xref_id):
# if entry['primaryId'] == 'FB:FBmultipub_1740':
# logger.info("id %s xref id %s ", entry['primaryId'], xref_entry['id'])
issn_group = re.search(r"^ISSN:(.+)$", xref_entry['id'])
if issn_group is not None:
issn = issn_group[1]
issn = simplify_text_keep_digits(issn)
# if entry['primaryId'] == 'FB:FBmultipub_1740':
# logger.info("id %s xref id %s issn %s", entry['primaryId'], xref_entry['id'], issn)
if issn in resource_to_nlm:
# if entry['primaryId'] == 'FB:FBmultipub_1740':
# logger.info("id %s xref id %s issn %s nlm %s", entry['primaryId'], xref_entry['id'], issn, resource_to_nlm[issn])
if len(resource_to_nlm[issn]) == 1:
for value in values_to_add:
resource_to_mod_issn_nlm[mod][value] = resource_to_nlm[issn][0]
# if entry['primaryId'] == 'FB:FBmultipub_1740':
# logger.info("id %s xref id %s issn %s nlm %s value %s nlm %s mod %s", entry['primaryId'], xref_entry['id'], issn, resource_to_nlm[issn], value, resource_to_nlm[issn][0], mod)
except IOError as e:
logger.warning(e) # most mods don't have a resource file
return resource_to_mod, resource_to_mod_issn_nlm
def load_pubmed_resource():
"""
:return:
"""
# logger.info("Starting load_pubmed_resource")
resource_data = dict()
filename = base_path + 'pubmed_resource_json/resource_pubmed_all.json'
try:
f = open(filename)
resource_data = json.load(f)
f.close()
except IOError:
logger.info("No resource_pubmed_all.json file at %s", filename)
resource_to_nlm = dict()
resource_to_nlm_highest = dict()
resource_nlm_to_title = dict()
resource_fields = ['primaryId', 'nlm', 'title', 'isoAbbreviation', 'medlineAbbreviation', 'printISSN', 'onlineISSN']
# ZFIN does not have ISSN in crossReferences, and may have already fixed them for 4.1.0
for entry in resource_data:
primary_id = entry['primaryId']
nlm = entry['nlm']
title = entry['title']
resource_nlm_to_title[nlm] = title
for field in resource_fields:
if field in entry:
# value = entry[field].lower()
value = simplify_text_keep_digits(entry[field])
# if nlm == '8000640':
# logger.info("field %s value %s", field, value)
# if value == '2985088r':
# print("2985088r loaded\n")
if value in resource_to_nlm:
# if value == '2985088r':
# print("already in 2985088r to %s loaded\n" % (value))
if primary_id not in resource_to_nlm[value]:
resource_to_nlm[value].append(primary_id)
if strip_string_to_integer(nlm) > strip_string_to_integer(resource_to_nlm_highest[value]):
resource_to_nlm_highest[value] = nlm
# if value == '2985088r':
# print("append in 2985088r to %s loaded\n" % (value))
else:
resource_to_nlm[value] = [primary_id]
resource_to_nlm_highest[value] = nlm
# if value == '2985088r':
# print("orig 2985088r to %s loaded\n" % (value))
# logger.info("End load_pubmed_resource")
return resource_to_nlm, resource_to_nlm_highest, resource_nlm_to_title
def strip_string_to_integer(string):
"""
:param string:
:return:
"""
return int("".join(filter(lambda x: x.isdigit(), string)))
def load_pmid_multi_mods():
"""
:return:
"""
pmid_multi_mods = dict()
pmid_multi_mods_file = base_path + 'pmids_by_mods'
with open(pmid_multi_mods_file, 'r') as f:
for line in f:
cols = line.split("\t")
if int(cols[1]) > 1:
pmid_multi_mods[cols[0]] = cols[1]
f.close()
return pmid_multi_mods
def aggregate_dqm_with_pubmed(input_path, input_mod, output_directory): # noqa: C901
# reads agr_schemas's reference.json to check for dqm data that's not accounted for there.
# outputs sanitized json to sanitized_reference_json/
# does checks on dqm crossReferences. if primaryId is not PMID, and a crossReference is PubMed,
# assigns PMID to primaryId and to authors's referenceId.
# if any reference's author doesn't have author Rank, assign authorRank based on array order.
cross_ref_no_pages_ok_fields = ['DOI', 'PMID', 'PMC', 'PMCID', 'ISBN']
pmid_fields = ['authors', 'volume', 'title', 'pages', 'issueName', 'issueDate', 'datePublished', 'dateArrivedInPubmed', 'dateLastModified', 'abstract', 'pubMedType', 'publisher', 'meshTerms', 'plainLanguageAbstract', 'pubmedAbstractLanguages', 'publicationStatus']
# single_value_fields = ['volume', 'title', 'pages', 'issueName', 'issueDate', 'datePublished', 'dateArrivedInPubmed', 'dateLastModified', 'abstract', 'pubMedType', 'publisher']
single_value_fields = ['volume', 'title', 'pages', 'issueName', 'issueDate', 'datePublished', 'dateArrivedInPubmed', 'dateLastModified', 'abstract', 'publisher', 'plainLanguageAbstract', 'pubmedAbstractLanguages', 'publicationStatus']
replace_value_fields = ['authors', 'pubMedType', 'meshTerms']
# date_fields = ['issueDate', 'datePublished', 'dateArrivedInPubmed', 'dateLastModified']
# datePublished is a string, not a proper date field
date_fields = ['issueDate', 'dateArrivedInPubmed', 'dateLastModified']
compare_if_dqm_empty = False # do dqm vs pmid comparison even if dqm has no data, by default skip
# mods = ['SGD', 'RGD', 'FB', 'WB', 'MGI', 'ZFIN']
# RGD should be first in mods list. if conflicting allianceCategories the later mod gets priority
# mods = ['RGD', 'SGD', 'FB', 'MGI', 'ZFIN', 'WB']
mods = ['RGD', 'MGI', 'SGD', 'FB', 'ZFIN', 'WB']
if input_mod in mods:
mods = [input_mod]
# this has to be loaded, if the mod data is hashed by pmid+mod and sorted for those with
# multiple mods, there's an out-of-memory crash
pmid_multi_mods = load_pmid_multi_mods()
# use these two lines to properly load resource data, but it takes a bit of time
resource_to_nlm, resource_to_nlm_highest, resource_nlm_to_title = load_pubmed_resource()
resource_to_mod, resource_to_mod_issn_nlm = load_mod_resource(mods, resource_to_nlm)
# use these six lines to more quickly test other things that don't need resource data
# resource_to_nlm = dict()
# resource_to_nlm_highest = dict()
# resource_nlm_to_title = dict()
# resource_to_mod = dict()
# for mod in mods:
# resource_to_mod[mod] = dict()
expected_cross_reference_type, exclude_cross_reference_type, pubmed_not_dqm_cross_reference_type = populate_expected_cross_reference_type()
resource_not_found = dict()
cross_reference_types = dict()
json_storage_path = base_path + output_directory + 'sanitized_reference_json/'
if not path.exists(json_storage_path):
makedirs(json_storage_path)
report_file_path = base_path + output_directory + 'report_files/'
if not path.exists(report_file_path):
makedirs(report_file_path)
fh_mod_report = dict()
fh_mod_report_title = dict()
fh_mod_report_differ = dict()
# fh_mod_report_xrefs = dict()
fh_mod_report_resource_unmatched = dict()
fh_mod_report_reference_no_resource = dict()
for mod in mods:
resource_not_found[mod] = dict()
# cross_reference_types[mod] = set()
cross_reference_types[mod] = dict()
filename = report_file_path + mod + '_main'
filename_title = report_file_path + mod + '_dqm_pubmed_differ_title'
filename_differ = report_file_path + mod + '_dqm_pubmed_differ_other'
# filename_xrefs = report_file_path + mod + '_dqm_pubmed_differ_xrefs'
filename_resource_unmatched = report_file_path + mod + '_resource_unmatched'
filename_reference_no_resource = report_file_path + mod + '_reference_no_resource'
fh_mod_report.setdefault(mod, open(filename, 'w'))
fh_mod_report_title.setdefault(mod, open(filename_title, 'w'))
fh_mod_report_differ.setdefault(mod, open(filename_differ, 'w'))
# fh_mod_report_xrefs.setdefault(mod, open(filename_xrefs, 'w'))
fh_mod_report_resource_unmatched.setdefault(mod, open(filename_resource_unmatched, 'w'))
fh_mod_report_reference_no_resource.setdefault(mod, open(filename_reference_no_resource, 'w'))
multi_report_filename = base_path + output_directory + 'report_files/multi_mod'
fh_mod_report.setdefault('multi', open(multi_report_filename, 'w'))
# these are not needed, there are no dqm vs pubmed comparisons for multiple mods
# multi_report_filename_title = base_path + 'report_files/multi_mod_dqm_pubmed_title_differ'
# multi_report_filename_differ = base_path + 'report_files/multi_mod_dqm_pubmed_differ'
# fh_mod_report_title.setdefault('multi', open(multi_report_filename_title, 'w'))
# fh_mod_report_differ.setdefault('multi', open(multi_report_filename_differ, 'w'))
logger.info("Aggregating DQM and PubMed data from %s using mods %s", input_path, mods)
agr_schemas_reference_json_url = 'https://raw.githubusercontent.com/alliance-genome/agr_schemas/master/ingest/resourcesAndReferences/reference.json'
schema_data = dict()
with urllib.request.urlopen(agr_schemas_reference_json_url) as url:
schema_data = json.loads(url.read().decode())
# print(schema_data)
# this has been obsoleted by generating from parse_dqm_json_resource.py but leaving in here until a full run works
# fb have fb ids for resources, but from the resourceAbbreviation and pubmed xml's nlm, we can update
# fb resource data to primary key off of nlm
# fb_resource_abbreviation_to_nlm = dict()
sanitized_pubmed_multi_mod_data = []
unmerged_pubmed_data = dict() # pubmed data by pmid and mod that needs some fields merged
for mod in mods:
filename = input_path + '/REFERENCE_' + mod + '.json'
logger.info("Processing %s", filename)
unexpected_mod_properties = set()
dqm_data = dict()
try:
with open(filename, 'r') as f:
dqm_data = json.load(f)
f.close()
except IOError:
logger.info("No file found for mod %s %s", mod, filename)
continue
entries = dqm_data['data']
sanitized_pubmod_data = []
sanitized_pubmed_single_mod_data = []
for entry in entries:
is_pubmod = True
pmid = None
update_primary_id = False
primary_id = entry['primaryId']
orig_primary_id = entry['primaryId']
# print("primaryId %s" % (entry['primaryId']))
blank_fields = set()
for entry_property in entry:
if entry_property not in schema_data['properties']:
unexpected_mod_properties.add(entry_property)
if entry_property in single_value_fields:
if entry[entry_property] == "":
blank_fields.add(entry_property)
too_many_xref_per_type_failure = False
for entry_field in blank_fields:
del entry[entry_field]
# need to process crossReferences once to reassign primaryId if PMID and filter out
# unexpected crossReferences,
# then again later to clean up crossReferences that get data from pubmed xml (once the PMID is known)
if 'crossReferences' in entry:
expected_cross_references = []
dqm_xrefs = dict()
for cross_reference in entry['crossReferences']:
prefix, identifier, separator = split_identifier(cross_reference["id"])
if prefix not in dqm_xrefs:
dqm_xrefs[prefix] = set()
dqm_xrefs[prefix].add(identifier)
if 'pages' in cross_reference:
if len(cross_reference["pages"]) > 1:
fh_mod_report[mod].write("mod %s primaryId %s has cross reference identifier %s with multiple web pages %s\n" % (mod, primary_id, cross_reference["id"], cross_reference["pages"]))
# logger.info("mod %s primaryId %s has cross reference identifier %s with web pages %s", mod, primary_id, cross_reference["id"], cross_reference["pages"])
else:
if not re.match(r"^PMID:[0-9]+", orig_primary_id):
if cross_reference["pages"][0] == 'PubMed':
xref_id = cross_reference["id"]
if re.match(r"^PMID:[0-9]+", xref_id):
update_primary_id = True
primary_id = xref_id
entry['primaryId'] = xref_id
else:
if prefix not in cross_ref_no_pages_ok_fields:
fh_mod_report[mod].write("mod %s primaryId %s has cross reference identifier %s without web pages\n" % (mod, primary_id, cross_reference["id"]))
| |
# Copyright 2021 National Technology & Engineering Solutions
# of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS,
# the U.S. Government retains certain rights in this software.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functionality for communicating using NNG, see https://pynng.readthedocs.io.
"""
import collections
import contextlib
import functools
import hashlib
import itertools
import logging
import math
import multiprocessing
import numbers
import os
import pickle
import queue
import threading
import time
import traceback
import numpy
import pynng
from .interface import Communicator
import cicada
import cicada.bind
log = logging.getLogger(__name__)
logging.getLogger("pynng.nng").setLevel(logging.INFO)
Message = collections.namedtuple("Message", ["serial", "tag", "sender", "payload"])
Message.__doc__ = """
Wrapper class for messages sent between processes.
"""
def nng_timeout(value):
"""Convert a timeout in seconds to an NNG timeout."""
return -1 if value is None else int(float(value) * 1000.0)
class Timer(object):
def __init__(self):
self._start = time.time()
def elapsed(self):
return time.time() - self._start
class Failed(Exception):
"""Used to indicate that a player process raised an exception."""
def __init__(self, exception, traceback):
self.exception = exception
self.traceback = traceback
def __repr__(self):
return f"Failed(exception={self.exception!r})" # pragma: no cover
class Terminated(Exception):
"""Used to indicate that a player process terminated unexpectedly without output."""
def __init__(self, exitcode):
self.exitcode = exitcode
def __repr__(self):
return f"Terminated(exitcode={self.exitcode!r})" # pragma: no cover
class Revoked(Exception):
"""Raised calling an operation after the communicator has been revoked."""
pass
class Timeout(Exception):
"""Raised when a blocking operation has timed-out."""
pass
class TryAgain(Exception):
"""Raised when a non-blocking operation would block."""
pass
class NNGCommunicator(Communicator):
"""Cicada communicator that uses pynng (https://pynng.readthedocs.io) as the transport layer.
Note
----
Creating a communicator is a collective operation that must be called by
all players that will be members.
Parameters
----------
name: string, optional
The name of this communicator, which is used strictly for logging
and debugging. If unspecified the default is "world".
world_size: integer, optional
The number of players that will be members of this communicator.
Defaults to the value of the WORLD_SIZE environment variable.
link_addr: string, optional
NNG address of the root (rank 0) player. This address must be
publically accessible to all of the other players. Defaults to the
value of the LINK_ADDR environment variable.
rank: integer, optional
The rank of the local player, in the range [0, world_size). Defaults
to the value of the RANK environment variable.
host_addr: string, optional
NNG address of the local player. This address must be publically
accessible to all of the other players. Defaults to the value of the
HOST_ADDR environment variable. Note that this value is ignored
by the root player.
timeout: number or `None`
Maximum time to wait for normal communication to complete in seconds, or `None` to disable timeouts.
setup_timeout: number or `None`
Maximum time allowed to setup the communicator in seconds, or `None` to disable timeouts during setup.
"""
_tags = [
"allgather",
"barrier-enter",
"barrier-exit",
"broadcast",
"gather",
"gatherv",
"revoke",
"scatter",
"scatterv",
"send",
"shrink-enter",
"shrink-exit",
"split",
"split-prepare",
]
class _Done(object):
"""Sentinel message used to shut-down the queueing thread."""
pass
def __init__(self, *, name=None, world_size=None, rank=None, link_addr=None, host_addr=None, token=0, timeout=5, setup_timeout=5):
# Setup defaults.
if name is None:
name = "world"
if world_size is None:
world_size = int(os.environ["WORLD_SIZE"])
if rank is None:
rank = int(os.environ["RANK"])
if link_addr is None:
link_addr = os.environ["LINK_ADDR"]
if host_addr is None:
host_addr = os.environ["HOST_ADDR"]
# Enforce preconditions.
if not isinstance(world_size, int):
raise ValueError("world_size must be an integer.") # pragma: no cover
if not world_size > 0:
raise ValueError("world_size must be an integer greater than zero.") # pragma: no cover
if not isinstance(rank, int):
raise ValueError("rank must be an integer.") # pragma: no cover
if not (0 <= rank and rank < world_size):
raise ValueError(f"rank must be in the range [0, {world_size}).") # pragma: no cover
if not isinstance(link_addr, str):
raise ValueError("link_addr must be an NNG address string.") # pragma: no cover
if not isinstance(host_addr, str):
raise ValueError("host_addr must be an NNG address string.") # pragma: no cover
if rank == 0 and link_addr != host_addr:
raise ValueError(f"link_addr {link_addr} and host_addr {host_addr} must match for rank 0.") # pragma: no cover
# Setup the player's receiving socket.
self._receiver = pynng.Rep0(listen=host_addr, recv_timeout=nng_timeout(setup_timeout))
log.info(f"Player {rank} rendezvous with {link_addr} from {host_addr}.")
# Rank 0 waits for every player to send their address.
if rank == 0:
remaining_ranks = set(range(1, world_size))
addresses = [(rank, host_addr)]
for index in range(1, world_size):
other_rank, other_host_addr, other_token = pickle.loads(self._receiver.recv())
self._receiver.send(b"ok")
if other_token != token:
raise RuntimeError(f"Player {rank} expected token {token}, received {other_token} from player {other_rank}.")
addresses.append((other_rank, other_host_addr))
# Setup sockets for sending to the other players.
addresses = [address for rank, address in sorted(addresses)]
self._players = [pynng.Req0(dial=address) for address in addresses]
# Send addresses back to the other players.
for player in self._players[1:]:
player.send(pickle.dumps(addresses))
player.recv()
# All players send their address to rank 0.
if rank != 0:
with pynng.Req0(dial=link_addr) as link:
link.send(pickle.dumps((rank, host_addr, token)))
link.recv()
addresses = pickle.loads(self._receiver.recv())
self._receiver.send(b"ok")
# Setup sockets for sending to the other players.
self._players = [pynng.Req0(dial=address) for address in addresses]
# We don't want a timeout for the receiving socket.
self._receiver.recv_timeout = nng_timeout(None)
self._name = name
self._world_size = world_size
self._link_addr = link_addr
self._rank = rank
self._host_addr = host_addr
self._timeout = timeout
self._revoked = False
self._send_serial = 0
self._stats = {
"bytes": {
"sent": {
"total": 0,
},
"received": {
"total": 0,
},
},
"messages": {
"sent": {
"total": 0,
},
"received": {
"total": 0,
},
},
}
# Setup queue for outgoing messages.
self._outgoing = queue.Queue()
# Setup queues for incoming messages.
self._incoming = queue.Queue()
self._receive_queues = {}
for tag in self._tags:
if tag not in ["revoke"]:
self._receive_queues[tag] = {}
for rank in self.ranks:
self._receive_queues[tag][rank] = queue.Queue()
# Start sending outgoing messages.
self._outgoing_thread = threading.Thread(name="Outgoing", target=self._send_messages, daemon=True)
self._outgoing_thread.start()
# Start queueing incoming messages.
self._queueing_thread = threading.Thread(name="Queueing", target=self._queue_messages, daemon=True)
self._queueing_thread.start()
# Start receiving incoming messages.
self._receiving_thread = threading.Thread(name="Incoming", target=self._receive_messages, daemon=True)
self._receiving_thread.start()
self._freed = False
log.info(f"Comm {self.name!r} player {self._rank} communicator ready.")
def _queue_messages(self):
# Place incoming messages in the correct queue.
while True:
# Wait for the next incoming message.
message = self._incoming.get(block=True, timeout=None)
# If the communicator has been freed, exit the thread.
if isinstance(message, NNGCommunicator._Done):
return
# Drop messages with missing attributes or unexpected values.
try:
if not hasattr(message, "payload"):
raise RuntimeError(f"Message missing payload.") # pragma: no cover
if not hasattr(message, "sender"):
raise RuntimeError(f"Message missing sender.") # pragma: no cover
if not hasattr(message, "serial"):
raise RuntimeError(f"Message missing serial number.") # pragma: no cover
if not hasattr(message, "tag"):
raise RuntimeError(f"Message missing tag.") # pragma: no cover
if message.tag not in self._tags:
raise RuntimeError(f"Unknown tag: {message.tag}") # pragma: no cover
if message.sender not in self.ranks:
raise RuntimeError(f"Unexpected sender: {message.sender}") # pragma: no cover
except Exception as e: # pragma: no cover
log.error(f"Comm {self.name!r} player {self.rank} dropping unexpected message: {e}")
continue
# Log received messages.
if log.isEnabledFor(logging.DEBUG):
log.debug(f"Comm {self.name!r} player {self.rank} <-- player {message.sender} {message.tag}#{message.serial:04}")
# Revoke messages don't get queued because they receive special handling.
if message.tag == "revoke":
if not self._revoked:
self._revoked = True
log.info(f"Comm {self.name!r} player {self.rank} revoked by player {message.sender}")
continue
# Insert the message into the correct queue.
self._receive_queues[message.tag][message.sender].put(message, block=True, timeout=None)
def _receive_messages(self):
# Parse and queue incoming messages as they arrive.
while True:
try:
# Wait for a message to arrive from the pynng socket.
raw_message = self._receiver.recv(block=True)
self._receiver.send(b"ok")
# Update statistics.
self._stats["bytes"]["received"]["total"] += len(raw_message)
self._stats["messages"]["received"]["total"] += 1
# Ignore unparsable messages.
try:
message = pickle.loads(raw_message)
except Exception as e: # pragma: no cover
log.error(f"Comm {self.name!r} player {self.rank} ignoring unparsable message: {e}")
continue
# Insert the message into the incoming queue.
self._incoming.put(message, block=True, timeout=None)
except pynng.exceptions.Closed:
# The communicator has been freed, so exit the thread.
log.debug(f"Comm {self.name!r} player {self.rank} receiving socket closed.")
break
def _receive(self, *, tag, sender, block):
try:
return self._receive_queues[tag][sender].get(block=block, timeout=self._timeout)
| |
#!/usr/bin/env python
# coding: utf-8
# # Decision Trees (with GINI) for Predictive Maintenance
# Supplementary Jupyter Notebook for the seminar paper of <NAME> and <NAME>.
#
# This Notebook implements the procedure to build a decision-tree based on Gini-index metric.
# It has been setup to build a Decision-Tree for the predictive-maintance example of the lecture.
#
# See Homework 3.2: Calculate the Decision Tree for UseCase “Predictive Maintenance” on slide p.77. Do the following steps:
# 1. Calculate the Frequency Matrices for the features „Temperatur“, „Druck“ and „Füllstand“
# 2. Define the Root-node by calculating the GINI-Index for all values of the three features. Define the optimal split-value for the root-node (see slide p.67)
# 3. Finalize the decision tree by calculation of GINI-Index for the remaining features “Temp.” and “Füllst.”
# Task: Create and describe the algorithms to automate the calculation of the steps 1. to 3.
#
# In[1]:
import platform
import datetime
import numpy as np
import pandas as pd
import graphviz as gv
print(f"This notebook was launched at: {datetime.datetime.now()}")
print()
print("Versions of the used runtime and libraries:")
print(f"- python {platform.python_version()}")
print(f"- pandas {pd.__version__}")
print(f"- numpy {np.__version__}")
print(f"- graphviz {gv.__version__}")
# In[2]:
# The name of the feature that should get predicted
predict_feature = 'Fehler'
# A set of possible values that the feature to predict might have
predict_values = [True, False]
# A set of value-sets on which to base the decision tree
data = pd.DataFrame(np.array([
[244, 140, 4600, False],
[200, 130, 4300, False],
[245, 108, 4100, True],
[250, 112, 4100, False],
[200, 107, 4200, False],
[272, 170, 4400, True],
[265, 105, 4100, False],
[248, 138, 4800, True],
[200, 194, 4500, True],
]), columns = ['Temperatur', 'Druck', 'Füllstand', 'Fehler'])
# ## Datastructures and utilities
# Before we start to build the decision-tree, we first define datastructures that can be used later to represent a decision tree.
# Further utility functions are defined to render a decision-tree in a graphical, human-understandable way.
# In[3]:
# A list of all features that will be considered when building the decision tree.
# This is any feature except the feature to predict.
input_features = list(filter(lambda f: f != predict_feature, data.columns))
# A question of a decision tree that defines on which feature at which threshold to divide.
#
# If futher distinction between the data is possible, a further question for the values
# below and above the threshold can be provided. Otherwise the tree will contain the
# prediction result for these cases as generated from the `calculate_prediction` function.
class Decision:
def __init__(self, feature, threshold, below, above):
self.feature = feature
self.threshold = threshold
self.below = below
self.above = above
# In[4]:
# Represent a prediction for the "predict_feature" as it
# will be present at each leaves of the decision-tree
class Prediction:
# Create a prediction by consuming the values, that are
# predicted at one leaf of the decision-tree.
def __init__(self, values):
total = len(values)
self.props = {
value: values.count(value) / total
for value in predict_values
}
# If there is only one value with a propability of 100% predicted, then get that value
def single_value(self):
single_value = [value for value, prop in self.props.items() if prop == 1.00]
return single_value[0] if len(single_value) > 0 else None
# Build a humanreadable string that describes the propability of the
# occurrence of each predicted value in percent.
def multi_label(self):
return ", ".join([
f"{int(percentage * 100)}% {value}"
for [value, percentage] in self.props.items()
if percentage > 0.0
])
# Example:
print(f"Prediction: {Prediction([True, False, False]).multi_label()}")
# In[5]:
# Visualize a calculated decision-tree using Graphviz
def render_tree(tree):
i = 0
def render_node(dot, tree):
nonlocal i
if isinstance(tree, Decision):
# Render a decision on a feature with its subtrees
treeId = i
dot.node(str(treeId), tree.feature)
i += 1
dot.edge(str(treeId), str(i), f"<= {tree.threshold}")
render_node(dot, tree.below)
dot.edge(str(treeId), str(i), f"> {tree.threshold}")
render_node(dot, tree.above)
elif isinstance(tree, Prediction):
# Render a prediction which can be one single result which
# has a propability of 100% or multiple weighted values.
val = tree.single_value()
if val != None:
dot.node(str(i), str(val))
else:
dot.node(str(i), tree.multi_label())
i += 1
dot = gv.Digraph()
render_node(dot, tree)
return dot
# ---
# # Gini index
# In the next section we implement the algorithms to calculate the Gini-Index for given value-sets.
# These metrics are later used to make optimal decisions when choosing the questions for the decision-tree.
# ### Calculate the Gini-index
# Calculate the gini-index (also commonly referred to as Gini-impurity) for a set of predicted values as follows:
#
# $ 1 - \sum^{j}_{i=1} {p_i}^{2} $
#
# ${j}$ is the amount of `predict_values`, beeing `len(predict_values)`
# ${p_i}$ is the propability that the `predict_feature` of a random value from the input value-set is equal to `predict_values[i]`
#
# This metric describes how homogeneous a set of values is.
# Datasets with much variation between the values have high index values,
# while a set of very similar values has a low index.
# In[6]:
def gini_index(values):
index = 1
total = len(values)
# If theres are no values, then this early return
# prevents a division by zero exception.
if total == 0: return 0
for predict_value in predict_values:
# How many of the values match the predict_value
count = len(list(filter(lambda val: val == predict_value, values)))
index -= (count / total) ** 2
return index
# Examples:
mixed_index = gini_index([False, False, True, True])
print(f"The Gini-index of maximum mixed values is: {mixed_index}")
homogeneous_index = gini_index([True, True])
print(f"The Gini-index of homogeneous values is: {homogeneous_index}")
# ### Calculate the Gini-index for a split on a feature
# Calculate a Gini-index for a split thresold on a feature.
# It is a weighted average of the gini-index for the values below and above a defined threshold.
#
# The gini-index therefore describes how well a split for a feature partitions a dataset into two subdatasets as evenly as possible.
# A gini-index of zero is therefore an ideal split into two equal categories.
# In[7]:
def split_gini_index(data, feature_name, threshold):
def frq(value_set):
return len(value_set) / len(data)
# Collect the values predicted by the value-sets below/above the threshold
below = data[data[feature_name] <= threshold][predict_feature]
above = data[data[feature_name] > threshold][predict_feature]
return frq(below) * gini_index(below) + frq(above) * gini_index(above)
# Examples:
idea_threshold = split_gini_index(pd.DataFrame(np.array([
[200, False],
[200, False],
[200, False],
[244, True],
[245, True],
]), columns = ['Temperatur', 'Fehler']), 'Temperatur', 210)
print(f"Gini-index of a split that divides the dataset perfectly: {idea_threshold}")
non_perfect_threshold = split_gini_index(pd.DataFrame(np.array([
[200, True],
[200, True],
[244, True],
[245, False],
]), columns = ['Temperatur', 'Fehler']), 'Temperatur', 210)
print(f"Gini-index of a non-ideal split: {non_perfect_threshold}")
# ---
# # Choosing the best question to ask
# For each feature we will define a decision rule that partitions the dataset into values above and below a certain threshold.
# To choose an optimum threshold which provides the best separation, we are using the gini-index as metric.
#
# For each feature the optimum threshold to consider when asking a question is the one whose gini-index is the lowest.
#
# Given a dataset, the overall best question to ask is therefore, to partition over the feature whose optimum threshold has the overall lowest gini-index.
# ### Calculate all thresholds which can be considered for a feature.
# We therefore take all mean values between the numbers into account.
#
# Additional thresholds below the smallest and above the largest number have been considered in the lecture.
# They are implemented for completeness but should be unattainable in practice.
#
#
#
# <table style="float: right;">
# <tr>
# <td width="100px" style="text-align: left;">Input-values</td>
# <td width="50px"></td>
# <td colspan="2" width="75px" style="border: black solid 1px; text-align: center;">200</td>
# <td colspan="2" width="75px" style="border: black solid 1px; text-align: center;">244</td>
# <td colspan="2" width="75px" style="border: black solid 1px; text-align: center;">245</td>
# <td colspan="2" width="75px" style="border: black solid 1px; text-align: center;">248</td>
# <td colspan="2" width="75px" style="border: black solid 1px; text-align: center;">250</td>
# <td colspan="2" width="75px" style="border: black solid 1px; text-align: center;">265</td>
# <td colspan="2" width="75px" style="border: black solid 1px; text-align: center;">272</td>
# <td width="50px"></td>
# </tr>
# <tr>
# <td width="100px" style="text-align: left;">Thresholds</td>
# <td colspan="2" width="75px" style="border: black solid 1px; text-align: center;">178</td>
# <td colspan="2" width="75px" style="border: black solid 1px; text-align: center;">222</td>
# <td colspan="2" width="75px" style="border: black solid 1px; text-align: center;">244,5</td>
# <td colspan="2" width="75px" style="border: black solid 1px; text-align: center;">246,5</td>
# <td colspan="2" width="75px" style="border: black solid 1px; text-align: center;">249</td>
# <td | |
yang_type='oc-yang:counter64', is_config=False)""",
})
self.__connection_failures = t
if hasattr(self, '_set'):
self._set()
def _unset_connection_failures(self):
self.__connection_failures = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="connection-failures", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
def _get_connection_timeouts(self):
"""
Getter method for connection_timeouts, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/connection_timeouts (oc-yang:counter64)
YANG Description: Number of connection timeouts to the server
"""
return self.__connection_timeouts
def _set_connection_timeouts(self, v, load=False):
"""
Setter method for connection_timeouts, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/connection_timeouts (oc-yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_connection_timeouts is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_connection_timeouts() directly.
YANG Description: Number of connection timeouts to the server
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="connection-timeouts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """connection_timeouts must be of a type compatible with oc-yang:counter64""",
'defined-type': "oc-yang:counter64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="connection-timeouts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)""",
})
self.__connection_timeouts = t
if hasattr(self, '_set'):
self._set()
def _unset_connection_timeouts(self):
self.__connection_timeouts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="connection-timeouts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
def _get_messages_sent(self):
"""
Getter method for messages_sent, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/messages_sent (oc-yang:counter64)
YANG Description: Number of messages sent to the server
"""
return self.__messages_sent
def _set_messages_sent(self, v, load=False):
"""
Setter method for messages_sent, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/messages_sent (oc-yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_messages_sent is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_messages_sent() directly.
YANG Description: Number of messages sent to the server
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="messages-sent", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """messages_sent must be of a type compatible with oc-yang:counter64""",
'defined-type': "oc-yang:counter64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="messages-sent", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)""",
})
self.__messages_sent = t
if hasattr(self, '_set'):
self._set()
def _unset_messages_sent(self):
self.__messages_sent = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="messages-sent", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
def _get_messages_received(self):
"""
Getter method for messages_received, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/messages_received (oc-yang:counter64)
YANG Description: Number of messages received by the server
"""
return self.__messages_received
def _set_messages_received(self, v, load=False):
"""
Setter method for messages_received, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/messages_received (oc-yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_messages_received is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_messages_received() directly.
YANG Description: Number of messages received by the server
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="messages-received", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """messages_received must be of a type compatible with oc-yang:counter64""",
'defined-type': "oc-yang:counter64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="messages-received", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)""",
})
self.__messages_received = t
if hasattr(self, '_set'):
self._set()
def _unset_messages_received(self):
self.__messages_received = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="messages-received", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
def _get_errors_received(self):
"""
Getter method for errors_received, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/errors_received (oc-yang:counter64)
YANG Description: Number of error messages received from the server
"""
return self.__errors_received
def _set_errors_received(self, v, load=False):
"""
Setter method for errors_received, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/errors_received (oc-yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_errors_received is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_errors_received() directly.
YANG Description: Number of error messages received from the server
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="errors-received", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """errors_received must be of a type compatible with oc-yang:counter64""",
'defined-type': "oc-yang:counter64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="errors-received", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)""",
})
self.__errors_received = t
if hasattr(self, '_set'):
self._set()
def _unset_errors_received(self):
self.__errors_received = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="errors-received", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
name = __builtin__.property(_get_name)
address = __builtin__.property(_get_address)
timeout = __builtin__.property(_get_timeout)
connection_opens = __builtin__.property(_get_connection_opens)
connection_closes = __builtin__.property(_get_connection_closes)
connection_aborts = __builtin__.property(_get_connection_aborts)
connection_failures = __builtin__.property(_get_connection_failures)
connection_timeouts = __builtin__.property(_get_connection_timeouts)
messages_sent = __builtin__.property(_get_messages_sent)
messages_received = __builtin__.property(_get_messages_received)
errors_received = __builtin__.property(_get_errors_received)
_pyangbind_elements = OrderedDict([('name', name), ('address', address), ('timeout', timeout), ('connection_opens', connection_opens), ('connection_closes', connection_closes), ('connection_aborts', connection_aborts), ('connection_failures', connection_failures), ('connection_timeouts', connection_timeouts), ('messages_sent', messages_sent), ('messages_received', messages_received), ('errors_received', errors_received), ])
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-system - based on the path /system/aaa/server-groups/server-group/servers/server/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state data
"""
__slots__ = ('_path_helper', '_extmethods', '__name','__address','__timeout','__connection_opens','__connection_closes','__connection_aborts','__connection_failures','__connection_timeouts','__messages_sent','__messages_received','__errors_received',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
helper = kwargs.pop("path_helper", None)
if helper is False:
self._path_helper = False
elif helper is not None and isinstance(helper, xpathhelper.YANGPathHelper):
self._path_helper = helper
elif hasattr(self, "_parent"):
helper = getattr(self._parent, "_path_helper", False)
self._path_helper = helper
else:
self._path_helper = False
self._extmethods = False
self.__name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='string', is_config=False)
self.__address = YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '^(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))$'}),], is_leaf=True, yang_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-inet:ip-address', is_config=False)
self.__timeout = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="timeout", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='uint16', is_config=False)
self.__connection_opens = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="connection-opens", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
self.__connection_closes = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="connection-closes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
self.__connection_aborts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="connection-aborts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
self.__connection_failures = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="connection-failures", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
self.__connection_timeouts = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="connection-timeouts", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
self.__messages_sent = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="messages-sent", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
self.__messages_received = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="messages-received", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
self.__errors_received = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="errors-received", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='oc-yang:counter64', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['system', 'aaa', 'server-groups', 'server-group', 'servers', 'server', 'state']
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/name (string)
YANG Description: Name assigned to the server
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
YANG Description: Name assigned to the server
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='string', is_config=False)""",
})
self.__name = t
if hasattr(self, '_set'):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/system', defining_module='openconfig-system', yang_type='string', is_config=False)
def _get_address(self):
"""
Getter method for address, mapped from YANG variable /system/aaa/server_groups/server_group/servers/server/state/address (oc-inet:ip-address)
YANG | |
<gh_stars>0
#!/usr/bin/env python3
import ctypes
import enum
import os
if os.name == "nt":
DM_ODBC_NAME = "odbc32.dll"
else:
DM_ODBC_NAME = "libodbc.so"
def UnimplementedSQLFunction(*args):
raise NotImplementedError("This SQL function is not implemented")
class Driver:
def __init__(self, odbc_driver_name = DM_ODBC_NAME, size_of_long = 8, unicode = True, legacy = True):
self.ODBC_DRIVER_NAME = odbc_driver_name
self.SIZE_OF_LONG = size_of_long
self.UNICODE = unicode
self.LEGACY = legacy
self.ODBC_DRIVER = ctypes.CDLL(self.ODBC_DRIVER_NAME)
self.UnimplementedSQLFunction = UnimplementedSQLFunction
####----------------------------------------------------------------------------
####This section mimics iodbcunix.h---------------------------------------------
####----------------------------------------------------------------------------
self.TRUE = 1
self.FALSE = 0
################################################################################
####Windows style typedefs######################################################
################################################################################
self.BYTE = ctypes.c_ubyte
self.WORD = ctypes.c_ushort
self.DWORD = ctypes.c_uint
self.LPSTR = ctypes.c_char_p
self.LPCSTR = ctypes.c_char_p
self.LPWSTR = ctypes.c_wchar_p
self.LPCWSTR = ctypes.c_wchar_p
self.LPDWORD = ctypes.POINTER(self.DWORD,)
self.BOOL = ctypes.c_int
####----------------------------------------------------------------------------
####This section mimics sqltypes.h----------------------------------------------
####----------------------------------------------------------------------------
################################################################################
####ODBC Specification##########################################################
################################################################################
self.ODBCVER = 0x0351
################################################################################
####ODBC Types##################################################################
################################################################################
self.SQLCHAR = ctypes.c_ubyte
self.SQLSMALLINT = ctypes.c_short
self.SQLUSMALLINT = ctypes.c_ushort
if self.SIZE_OF_LONG == 8:
self.SQLINTEGER = ctypes.c_int
self.SQLUINTEGER = ctypes.c_uint
else:
self.SQLINTEGER = ctypes.c_long
self.SQLUINTEGER = ctypes.c_ulong
self.SQLPOINTER = ctypes.c_void_p
self.SQLSCHAR = ctypes.c_char
self.SQLDATE = ctypes.c_ubyte
self.SQLDECIMAL = ctypes.c_ubyte
self.SQLNUMERIC = ctypes.c_ubyte
self.SQLDOUBLE = ctypes.c_double
self.SQLFLOAT = ctypes.c_double
self.SQLREAL = ctypes.c_float
self.SQLTIME = ctypes.c_ubyte
self.SQLTIMESTAMP = ctypes.c_ubyte
self.SQLVARCHAR = ctypes.c_ubyte
self.SQLBIGINT = ctypes.c_longlong
self.SQLUBIGINT = ctypes.c_ulonglong
self.SQLWCHAR = ctypes.c_ushort
if self.UNICODE:
self.SQLTCHAR = self.SQLWCHAR
else:
self.SQLTCHAR = self.SQLCHAR
if self.LEGACY:
self.SQLLEN = ctypes.c_int
self.SQLULEN = ctypes.c_uint
self.SQLSETPOSIROW = ctypes.c_ushort
else:
self.SQLLEN = ctypes.c_long
self.SQLULEN = ctypes.c_ulong
self.SQLSETPOSIROW = ctypes.c_ushort
################################################################################
####Backward Compatibility with older platform SDKs#############################
################################################################################
self.SQLROWCOUNT = self.SQLULEN
self.SQLROWSETSIZE = self.SQLULEN
self.SQLTRANSID = self.SQLULEN
self.SQLROWOFFSET = self.SQLLEN
################################################################################
####Generic Pointer Types#######################################################
################################################################################
self.PTR = ctypes.c_void_p
self.SQLHANDLE = ctypes.c_void_p
################################################################################
####Handles#####################################################################
################################################################################
self.HENV = ctypes.c_void_p
self.HDBC = ctypes.c_void_p
self.HSTMT = ctypes.c_void_p
self.SQLHENV = self.SQLHANDLE
self.SQLHDBC = self.SQLHANDLE
self.SQLHSTMT = self.SQLHANDLE
self.SQLHDESC = self.SQLHANDLE
self.HWND = self.SQLPOINTER
self.SQLHWND = self.SQLPOINTER
################################################################################
####Portable Types##############################################################
################################################################################
self.UCHAR = ctypes.c_ubyte
self.SCHAR = ctypes.c_char
self.SWORD = ctypes.c_short
self.UWORD = ctypes.c_ushort
self.SDWORD = ctypes.c_long
self.UDWORD = ctypes.c_ulong
self.SSHORT = ctypes.c_short
self.USHORT = ctypes.c_ushort
self.SLONG = ctypes.c_long
self.ULONG = ctypes.c_ulong
self.SFLOAT = ctypes.c_float
self.SDOUBLE = ctypes.c_double
self.LDOUBLE = ctypes.c_double
self.ODBCINT64 = ctypes.c_longlong
self.ODBCUINT64 = ctypes.c_ulonglong
################################################################################
####Return Types################################################################
################################################################################
self.RETCODE = ctypes.c_short
self.SQLRETURN = self.RETCODE
################################################################################
####Portable Types:DATA, TIME, TIMESTAMO, BOOKMARK##############################
################################################################################
self.BOOKMARK = self.SQLULEN
class DATE_STUCT_DEFINITION(ctypes.Structure):
_fields_ = [("year", self.SQLSMALLINT),
("month", self.SQLUSMALLINT),
("day", self.SQLUSMALLINT)]
self.DATE_STRUCT = DATE_STUCT_DEFINITION
self.SQL_DATE_STRUCT = self.DATE_STRUCT
class TIME_STRUCT_DEFINITION(ctypes.Structure):
_fields_ = [("hour", self.SQLUSMALLINT),
("minute", self.SQLUSMALLINT),
("second", self.SQLUSMALLINT)]
self.TIME_STRUCT = TIME_STRUCT_DEFINITION
self.SQL_TIME_STRUCT = self.TIME_STRUCT
class TIMESTAMP_STRUCT_DEFINITION(ctypes.Structure):
_fields_ = [("year", self.SQLSMALLINT),
("month", self.SQLUSMALLINT),
("day", self.SQLUSMALLINT),
("hour", self.SQLUSMALLINT),
("minute", self.SQLUSMALLINT),
("second", self.SQLUSMALLINT),
("fraction", self.SQLUINTEGER)]
self.TIMESTAMP_STRUCT = TIMESTAMP_STRUCT_DEFINITION
self.SQL_TIMESTAMP_STRUCT = self.TIMESTAMP_STRUCT
################################################################################
####enumeration for DATETIME_INTERVAL_SUBCODE###################################
################################################################################
self.SQLINTERVAL = ctypes.c_int
self.SQL_IS_YEAR = 1
self.SQL_IS_MONTH = 2
self.SQL_IS_DAY = 3
self.SQL_IS_HOUR = 4
self.SQL_IS_MINUTE = 5
self.SQL_IS_SECOND = 6
self.SQL_IS_YEAR_TO_MONTH = 7
self.SQL_IS_DAY_TO_HOUR = 8
self.SQL_IS_DAY_TO_MINUTE = 9
self.SQL_IS_DAY_TO_SECOND = 10
self.SQL_IS_HOUR_TO_MINUTE = 11
self.SQL_IS_HOUR_TO_SECOND = 12
self.SQL_IS_MINUTE_TO_SECOND = 13
class SQL_YEAR_MONTH_STRUCT_DEFINITION(ctypes.Structure):
_fields_ = [("year", self.SQLUINTEGER),
("month", self.SQLUINTEGER)]
self.SQL_YEAR_MONTH_STRUCT = SQL_YEAR_MONTH_STRUCT_DEFINITION
class SQL_DAY_SECOND_STRUCT_DEFINITION(ctypes.Structure):
_fields_ = [("day", self.SQLUINTEGER),
("hour", self.SQLUINTEGER),
("minute", self.SQLUINTEGER),
("second", self.SQLUINTEGER),
("fraction", self.SQLUINTEGER)]
self.SQL_DAY_SECOND_STRUCT = SQL_DAY_SECOND_STRUCT_DEFINITION
class SQL_INTERVAL_UNION_DEFINITION(ctypes.Union):
_fields_ = [("year_month", self.SQL_YEAR_MONTH_STRUCT),
("day_second", self.SQL_DAY_SECOND_STRUCT)]
self.SQL_INTERVAL_UNION = SQL_INTERVAL_UNION_DEFINITION
class SQL_INTERVAL_STRUCT_DEFINITION(ctypes.Structure):
_fields_ = [("interval_type", self.SQLINTERVAL),
("interval_sign", self.SQLSMALLINT),
("intval", self.SQL_INTERVAL_UNION)]
self.SQL_INTERVAL_STRUCT = SQL_INTERVAL_STRUCT_DEFINITION
################################################################################
####Numeric Data Type###########################################################
################################################################################
self.SQL_MAX_NUMERIC_LEN = 16
class SQL_NUMERIC_STRUCT_DEFINITION(ctypes.Structure):
_fields_ = [("precision", self.SQLCHAR),
("scale", self.SQLSCHAR),
("sign", self.SQLCHAR),
("val", self.SQLCHAR * self.SQL_MAX_NUMERIC_LEN)]
self.SQL_NUMERIC_STRUCT = SQL_NUMERIC_STRUCT_DEFINITION
################################################################################
################################################################################
################################################################################
class SQLGUID_DEFINITION(ctypes.Structure):
_fields_ = [("Data1", ctypes.c_uint),
("Data2", ctypes.c_ushort),
("Data3", ctypes.c_ushort),
("Data4", ctypes.c_char * 8)]
self.SQLGUID = SQLGUID_DEFINITION
####----------------------------------------------------------------------------
####This section mimics sql.h---------------------------------------------------
####----------------------------------------------------------------------------
################################################################################
####Useful Constans#############################################################
################################################################################
self.SQL_MAX_MESSAGE_LENGTH = 512
################################################################################
####Handle Types################################################################
################################################################################
self.SQL_HANDLE_ENV = 1
self.SQL_HANDLE_DBC = 2
self.SQL_HANDLE_STMT = 3
self.SQL_HANDLE_DESC = 4
################################################################################
####Function return codes#######################################################
################################################################################
self.SQL_SUCCESS = 0
self.SQL_SUCCESS_WITH_INFO = 1
self.SQL_STILL_EXECUTING = 2
self.SQL_ERROR = -1
self.SQL_INVALID_HANDLE = -2
self.SQL_NEED_DATA = 99
self.SQL_NO_DATA = 100
################################################################################
####Test for success############################################################
################################################################################
def SQL_SUCCEEDED_DEFINITION(return_code):
return (return_code & ~1) == 0
self.SQL_SUCCEEDED = SQL_SUCCEEDED_DEFINITION
################################################################################
####Special length vlaues#######################################################
################################################################################
self.SQL_NULL_DATA = -1
self.SQL_DATA_AT_EXEC = -2
################################################################################
####Flags for null-terminated strings###########################################
################################################################################
self.SQL_NTS = -3
self.SQL_NTSL = -3
################################################################################
####Standard SQL datatypes######################################################
################################################################################
self.SQL_UNKNOWN_TYPE = 0
self.SQL_CHAR = 1
self.SQL_NUMERIC = 2
self.SQL_DECIMAL = 3
self.SQL_INTEGER = 4
self.SQL_SMALLINT = 5
self.SQL_FLOAT = 6
self.SQL_REAL = 7
self.SQL_DOUBLE = 8
self.SQL_DATETIME = 9
self.SQL_VARCHAR = 12
################################################################################
####SQLGetTypeInfo request for all data types###################################
################################################################################
self.SQL_ALL_TYPES = 0
################################################################################
####Statement attribute values for date/time data types#########################
################################################################################
self.SQL_TYPE_DATE = 91
self.SQL_TYPE_TIME = 92
self.SQL_TYPE_TIMESTAMP = 93
################################################################################
####Date/Time constants#########################################################
################################################################################
self.SQL_DATE_LEN = 10
self.SQL_TIME_LEN = 8
self.SQL_TIMESTAMP_LEN = 19
################################################################################
####Null status constants#######################################################
################################################################################
self.SQL_NO_NULLS = 0
self.SQL_NULLABLE = 1
self.SQL_NULLABLE_UNKNOWN = 2
################################################################################
####NULL Handles################################################################
################################################################################
self.SQL_NULL_HENV = self.SQLHANDLE()
self.SQL_NULL_HDBC = self.SQLHANDLE()
self.SQL_NULL_HSTMT = self.SQLHANDLE()
self.SQL_NULL_HDESC = self.SQLHANDLE()
self.SQL_NULL_SQLLEN = ctypes.POINTER(self.SQLLEN)()
self.SQL_NULL_SQLULEN = ctypes.POINTER(self.SQLULEN)()
self.SQL_NULL_SQLSMALLINT = ctypes.POINTER(self.SQLSMALLINT)()
self.SQL_NULL_SQLUSMALLINT = ctypes.POINTER(self.SQLUSMALLINT)()
self.SQL_NULL_SQLINTEGER = ctypes.POINTER(self.SQLINTEGER)()
self.SQL_NULL_SQLUINTEGER = ctypes.POINTER(self.SQLUINTEGER)()
self.SQL_NULL_SQLCHAR = ctypes.POINTER(self.SQLCHAR)()
self.SQL_NULL_SQLWCHAR = ctypes.POINTER(self.SQLWCHAR)()
self.SQL_NULL_SQLTCHAR = ctypes.POINTER(self.SQLTCHAR)()
################################################################################
####Null parent for self.SQLHENV#####################################################
################################################################################
self.SQL_NULL_HANDLE = self.SQLHANDLE(0)
################################################################################
####CLI option values###########################################################
################################################################################
self.SQL_FALSE = 0
self.SQL_TRUE = 1
################################################################################
####Default conversion code#####################################################
################################################################################
self.SQL_DEFAULT = 99
################################################################################
####SQLDataSources/SQLFetchScroll - FetchOrientation############################
################################################################################
self.SQL_FETCH_NEXT = 1
self.SQL_FETCH_FIRST = 2
################################################################################
####SQLFetchScroll - FetchOrientation###########################################
################################################################################
self.SQL_FETCH_LAST = 3
self.SQL_FETCH_PRIOR = 4
self.SQL_FETCH_ABSOLUTE = 5
self.SQL_FETCH_RELATIVE = 6
################################################################################
####SQLFreeStmt#################################################################
################################################################################
self.SQL_CLOSE = 0
self.SQL_DROP = 1
self.SQL_UNBIND = 2
self.SQL_RESET_PARAMS = 3
################################################################################
####SQLGetConnectAttr - connection attributes###################################
################################################################################
self.SQL_ATTR_AUTO_IPD = 10001
self.SQL_ATTR_METADATA_ID = 10014
################################################################################
####SQLGetData code indicating that the application row descriptor##############
####specifies the data type#####################################################
################################################################################
self.SQL_ARD_TYPE = -99
################################################################################
####SQLGetDescField - identifiers of fields in the SQL descriptor###############
################################################################################
self.SQL_DESC_COUNT = 1001
self.SQL_DESC_TYPE = 1002
self.SQL_DESC_LENGTH = 1003
self.SQL_DESC_OCTET_LENGTH_PTR = 1004
self.SQL_DESC_PRECISION = 1005
self.SQL_DESC_SCALE = 1006
self.SQL_DESC_DATETIME_INTERVAL_CODE = 1007
self.SQL_DESC_NULLABLE = 1008
self.SQL_DESC_INDICATOR_PTR = 1009
self.SQL_DESC_DATA_PTR = 1010
self.SQL_DESC_NAME = 1011
self.SQL_DESC_UNNAMED = 1012
self.SQL_DESC_OCTET_LENGTH = 1013
self.SQL_DESC_ALLOC_TYPE = 1099
################################################################################
####SQLGetDescField - SQL_DESC_ALLOC_TYPE#######################################
################################################################################
self.SQL_DESC_ALLOC_AUTO = 1
self.SQL_DESC_ALLOC_USER = 2
################################################################################
####SQLGetDescField - SQL_DESC_DATETIME_INTERVAL_CODE###########################
################################################################################
self.SQL_CODE_DATE = 1
self.SQL_CODE_TIME = 2
self.SQL_CODE_TIMESTAMP = 3
################################################################################
####SQLGetDescField - SQL_DESC_UNNAMED##########################################
################################################################################
self.SQL_NAMED = 0
self.SQL_UNNAMED = 1
################################################################################
####SQLGetDiagField - identifiers of fields in the diagnostics area#############
################################################################################
self.SQL_DIAG_RETURNCODE = 1
self.SQL_DIAG_NUMBER = 2
self.SQL_DIAG_ROW_COUNT = 3
self.SQL_DIAG_SQLSTATE = 4
self.SQL_DIAG_NATIVE = 5
self.SQL_DIAG_MESSAGE_TEXT = 6
self.SQL_DIAG_DYNAMIC_FUNCTION = 7
self.SQL_DIAG_CLASS_ORIGIN = 8
self.SQL_DIAG_SUBCLASS_ORIGIN = 9
self.SQL_DIAG_CONNECTION_NAME = 10
self.SQL_DIAG_SERVER_NAME = 11
self.SQL_DIAG_DYNAMIC_FUNCTION_CODE = 12
################################################################################
####SQLGetDiagField - SQL_DIAG_DYNAMIC_FUNCTION_CODE############################
################################################################################
self.SQL_DIAG_ALTER_DOMAIN = 3
self.SQL_DIAG_ALTER_TABLE = 4
self.SQL_DIAG_CALL = 7
self.SQL_DIAG_CREATE_ASSERTION = 6
self.SQL_DIAG_CREATE_CHARACTER_SET = 8
self.SQL_DIAG_CREATE_COLLATION = 10
self.SQL_DIAG_CREATE_DOMAIN = 23
self.SQL_DIAG_CREATE_INDEX = -1
self.SQL_DIAG_CREATE_SCHEMA = 64
self.SQL_DIAG_CREATE_TABLE = 77
self.SQL_DIAG_CREATE_TRANSLATION = 79
self.SQL_DIAG_CREATE_VIEW = 84
self.SQL_DIAG_DELETE_WHERE = 19
self.SQL_DIAG_DROP_ASSERTION = 24
self.SQL_DIAG_DROP_CHARACTER_SET = 25
self.SQL_DIAG_DROP_COLLATION = 26
self.SQL_DIAG_DROP_DOMAIN = 27
self.SQL_DIAG_DROP_INDEX = -2
self.SQL_DIAG_DROP_SCHEMA = 31
self.SQL_DIAG_DROP_TABLE = 32
self.SQL_DIAG_DROP_TRANSLATION = 33
self.SQL_DIAG_DROP_VIEW = 36
self.SQL_DIAG_DYNAMIC_DELETE_CURSOR = 38
self.SQL_DIAG_DYNAMIC_UPDATE_CURSOR = 81
self.SQL_DIAG_GRANT = 48
self.SQL_DIAG_INSERT = 50
self.SQL_DIAG_REVOKE = 59
self.SQL_DIAG_SELECT_CURSOR = 85
self.SQL_DIAG_UNKNOWN_STATEMENT = 0
self.SQL_DIAG_UPDATE_WHERE = 82
################################################################################
####SQLGetEnvAttr - environment attribute#######################################
################################################################################
self.SQL_ATTR_OUTPUT_NTS = 10001
################################################################################
####SQLGetFunctions#############################################################
################################################################################
self.SQL_API_SQLALLOCCONNECT = 1
self.SQL_API_SQLALLOCENV = 2
self.SQL_API_SQLALLOCHANDLE = 1001
self.SQL_API_SQLALLOCSTMT = 3
self.SQL_API_SQLBINDCOL = 4
self.SQL_API_SQLBINDPARAM = 1002
self.SQL_API_SQLCANCEL = 5
self.SQL_API_SQLCLOSECURSOR = 1003
self.SQL_API_SQLCOLATTRIBUTE = 6
self.SQL_API_SQLCOLUMNS = 40
self.SQL_API_SQLCONNECT = 7
self.SQL_API_SQLCOPYDESC = 1004
self.SQL_API_SQLDATASOURCES = 57
self.SQL_API_SQLDESCRIBECOL = 8
self.SQL_API_SQLDISCONNECT = 9
self.SQL_API_SQLENDTRAN = 1005
self.SQL_API_SQLERROR = 10
self.SQL_API_SQLEXECDIRECT = 11
self.SQL_API_SQLEXECUTE = 12
self.SQL_API_SQLFETCH = 13
self.SQL_API_SQLFETCHSCROLL = 1021
self.SQL_API_SQLFREECONNECT = 14
self.SQL_API_SQLFREEENV = 15
self.SQL_API_SQLFREEHANDLE = 1006
self.SQL_API_SQLFREESTMT = 16
self.SQL_API_SQLGETCONNECTATTR = 1007
self.SQL_API_SQLGETCONNECTOPTION = 42
self.SQL_API_SQLGETCURSORNAME = 17
self.SQL_API_SQLGETDATA = 43
self.SQL_API_SQLGETDESCFIELD = 1008
self.SQL_API_SQLGETDESCREC = 1009
self.SQL_API_SQLGETDIAGFIELD = 1010
self.SQL_API_SQLGETDIAGREC = 1011
self.SQL_API_SQLGETENVATTR = 1012
self.SQL_API_SQLGETFUNCTIONS = 44
self.SQL_API_SQLGETINFO = 45
self.SQL_API_SQLGETSTMTATTR = 1014
self.SQL_API_SQLGETSTMTOPTION = 46
self.SQL_API_SQLGETTYPEINFO = 47
self.SQL_API_SQLNUMRESULTCOLS = 18
self.SQL_API_SQLPARAMDATA = 48
self.SQL_API_SQLPREPARE = 19
self.SQL_API_SQLPUTDATA = 49
self.SQL_API_SQLROWCOUNT = 20
self.SQL_API_SQLSETCONNECTATTR = 1016
self.SQL_API_SQLSETCONNECTOPTION = 50
self.SQL_API_SQLSETCURSORNAME = 21
self.SQL_API_SQLSETDESCFIELD = 1017
self.SQL_API_SQLSETDESCREC = 1018
self.SQL_API_SQLSETENVATTR = 1019
self.SQL_API_SQLSETPARAM = 22
self.SQL_API_SQLSETSTMTATTR = 1020
self.SQL_API_SQLSETSTMTOPTION = 51
self.SQL_API_SQLSPECIALCOLUMNS = 52
| |
#!/usr/bin/env python
"""
Takes a cifti map ('dscalar.nii') and outputs a csv of results
Usage:
ciftify_statclust_report [options] <func.dscalar.nii>
Arguments:
<func.dscalar.nii> Input map.
Options:
--min-threshold MIN the largest value [default: -2.85] to consider for being a minimum
--max-threshold MAX the smallest value [default: 2.85] to consider for being a maximum
--area-threshold MIN threshold [default: 20] for surface cluster area, in mm^2
--surface-distance MM minimum distance in mm [default: 20] between extrema of the same type.
--volume-distance MM minimum distance in mm [default: 20] between extrema of the same type.
--outputbase prefix Output prefix (with path) to output documents
--no-cluster-dlabel Do not output a dlabel map of the clusters
--output-peaks Also output an additional output of peak locations
--left-surface GII Left surface file (default is HCP S1200 Group Average)
--right-surface GII Right surface file (default is HCP S1200 Group Average)
--left-surf-area GII Left surface vertex areas file (default is HCP S1200 Group Average)
--right-surf-area GII Right surface vertex areas file (default is HCP S1200 Group Average)
--debug Debug logging
-n,--dry-run Dry run
-h, --help Prints this message
DETAILS
Note: at the moment generates separate outputs for surface.
Uses -cifti-separate in combination with FSL's clusterize to get information from
the subcortical space.
Outputs a cluster report csv with the following headings:
+ clusterID: Integer for the cluster this peak is from (corresponds to dlabel.nii)
+ cluster_name: the cluster label
+ by default this will be "LABEL_<clusterID>" but this be changed
in the .dlabel.nii file using connectome-workbench
+ mean_value: the average value for this cluster within the input dscalar.nii map
+ area: the surface area of the cluster (on the specified surface)
+ DKT_overlap: a list of DKT freesurfer anatomical atlas (aparc) atlas labels
that overlap with this cluster and the percent overlap of each label
+ Yeo7_overlap: a list of the Yeo et al 2011 7 network labels that overlap
with this cluster and the percent overlap of each label
+ MMP_overlap: The labels from the Glasser et al (2016) Multi-Modal Parcellation
that overlap with this cluster and the percent overlap of each label
If the "--output-peaks" flag is indicated, an addtional table will be output
with several headings:
+ clusterID: Integer for the cluster this peak is from (corresponds to dlabel.nii)
+ hemisphere: Hemisphere the peak is in (L or R)
+ vertex: The vertex id
+ x,y,z: The nearest x,y,z coordinates to the vertex
+ value: The intensity (value) at that vertex in the func.dscalar.nii
+ DKT: The label from the freesurfer anatomical atlas (aparc) at the vertex
+ DKT_overlap: The proportion of the cluster (clusterID) that overlaps with the DKT atlas label
+ Yeo7: The label from the Yeo et al 2011 7 network atlas at this peak vertex
+ Yeo7_overlap: The proportion of the cluster (clusterID) that overlaps with this Yeo7 network label
+ MMP: The label from the Glasser et al (2016) Multi-Modal Parcellation
+ MMP_overlap: The proportion of the cluster (clusterID) that overlaps with the MMP atlas label
If no surfaces of surface area files are given. The midthickness surfaces from
the HCP S1200 Group Mean will be used, as well as it's vertex-wise
surface area infomation.
Default name for the output csv taken from the input file.
i.e. func.dscalar.nii --> func_peaks.csv
Unless the '--no-cluster-dlabel' flag is given, a map of the clusters with be
be written to the same folder as the outputcsv to aid in visualication of the results.
This dlable map with have a name ending in '_clust.dlabel.nii'.
(i.e. func_peaks.csv & func_clust.dlabel.nii)
Atlas References:
Yeo, BT. et al. 2011. 'The Organization of the Human Cerebral Cortex
Estimated by Intrinsic Functional Connectivity.' Journal of Neurophysiology
106 (3): 1125-65.
Desikan, RS.et al. 2006. 'An Automated Labeling System for Subdividing the
Human Cerebral Cortex on MRI Scans into Gyral Based Regions of Interest.'
NeuroImage 31 (3): 968-80.
Glasser, MF. et al. 2016. 'A Multi-Modal Parcellation of Human Cerebral Cortex.'
Nature 536 (7615): 171-78.
Written by <NAME>, Last updated August 27, 2017
"""
from docopt import docopt
import os, sys
import numpy as np
import pandas as pd
import logging
import logging.config
import ciftify.io
import ciftify.report
import ciftify.utils
from ciftify.meants import NibInput
config_path = os.path.join(os.path.dirname(ciftify.config.find_ciftify_global()), 'bin', "logging.conf")
logging.config.fileConfig(config_path, disable_existing_loggers=False)
logger = logging.getLogger(os.path.basename(__file__))
def load_LR_vertex_areas(surf_settings):
''' loads the vertex areas and stacks the dataframes'''
surf_va_L = ciftify.io.load_gii_data(surf_settings.L.vertex_areas)
surf_va_R = ciftify.io.load_gii_data(surf_settings.R.vertex_areas)
surf_va_LR = np.vstack((surf_va_L, surf_va_R))
return(surf_va_LR)
def report_atlas_overlap(df, label_data, atlas, surf_va_LR, min_percent_overlap = 5):
# read the atlas
atlas_data, atlas_dict = ciftify.io.load_LR_label(atlas['path'],
int(atlas['map_number']))
# write an overlap report to the outputfile
o_col = '{}_overlap'.format(atlas['name'])
df[o_col] = ""
for pd_idx in df.index.get_values():
df.loc[pd_idx, o_col] = ciftify.report.get_label_overlap_summary(
pd_idx, label_data, atlas_data, atlas_dict, surf_va_LR,
min_percent_overlap = min_percent_overlap)
return(df)
def run_ciftify_dlabel_report(arguments, tmpdir):
dscalar_in = NibInput(arguments['<func.dscalar.nii>'])
surf_distance = arguments['--surface-distance']
outputbase = arguments['--outputbase']
dont_output_clusters = arguments['--no-cluster-dlabel']
output_peaktable = arguments['--output-peaks']
surf_settings = ciftify.report.CombinedSurfaceSettings(arguments, tmpdir)
atlas_settings = ciftify.report.define_atlas_settings()
## if not outputname is given, create it from the input dscalar map
if not outputbase:
outputbase = os.path.join(os.path.dirname(dscalar_in.path), dscalar_in.base)
ciftify.utils.check_output_writable(outputbase, exit_on_error = True)
clusters_dscalar = clusterise_dscalar_input(dscalar_in.path,
arguments,
surf_settings,
tmpdir)
if dont_output_clusters:
cluster_dlabel = os.path.join(tmpdir, 'clust.dlabel.nii')
else:
cluster_dlabel = '{}_clust.dlabel.nii'.format(outputbase)
empty_labels = os.path.join(tmpdir, 'empty_labels.txt')
ciftify.utils.run('touch {}'.format(empty_labels))
ciftify.utils.run(['wb_command', '-cifti-label-import',
clusters_dscalar, empty_labels, cluster_dlabel])
## load the data
label_data, label_dict = ciftify.io.load_LR_label(cluster_dlabel, map_number = 1)
## define the outputcsv
outputcsv = '{}_statclust_report.csv'.format(outputbase)
logger.info('Output table: {}'.format(outputcsv))
## load the vertex areas
surf_va_LR = load_LR_vertex_areas(surf_settings)
## assert that the dimensions match
if not (label_data.shape[0] == surf_va_LR.shape[0]):
logger.error('label file vertices {} not equal to vertex areas {}'
''.format(label_data.shape[0], surf_va_LR.shape[0]))
sys.exit(1)
## use the label dict to start the report dataframe
df = pd.DataFrame.from_dict(label_dict, orient = "index")
df['label_idx'] = df.index
df = df.rename(index=str, columns={0: "label_name"})
# calculate a column of the surface area for row ROIs
df['area'] = -999
for pd_idx in df.index.get_values():
df.loc[pd_idx, 'area'] = ciftify.report.calc_cluster_area(pd_idx,
label_data, surf_va_LR)
for atlas in atlas_settings.values():
df = report_atlas_overlap(df, label_data, atlas,
surf_va_LR, min_percent_overlap = 5)
df.to_csv(outputcsv)
if output_peaktable:
write_statclust_peaktable(dscalar_in.path, clusters_dscalar, outputbase,
arguments, surf_settings, atlas_settings)
class ThresholdArgs(object):
'''little class that holds the user aguments about thresholds'''
def __init__(self, arguments):
self.max = arguments([])
area_threshold = arguments['--area-threshold']
self.volume_distance = arguments['--volume-distance']
min_threshold = arguments['--min-threshold']
max_threshold = arguments['--max-threshold']
area_threshold = arguments['--area-thratlas_settingseshold']
def clusterise_dscalar_input(data_file, arguments, surf_settings, tmpdir):
'''runs wb_command -cifti-find-clusters twice
returns the path to the output
'''
## also run clusterize with the same settings to get clusters
pcluster_dscalar = os.path.join(tmpdir,'pclusters.dscalar.nii')
wb_cifti_clusters(data_file, pcluster_dscalar, surf_settings,
arguments['--max-threshold'],
arguments['--area-threshold'],
less_than = False, starting_label=1)
## load both cluster files to determine the max value
pos_clust_data = ciftify.io.load_concat_cifti_surfaces(pcluster_dscalar)
max_pos = int(np.max(pos_clust_data))
## now get the negative clusters
ncluster_dscalar = os.path.join(tmpdir,'nclusters.dscalar.nii')
wb_cifti_clusters(data_file, ncluster_dscalar, surf_settings,
arguments['--min-threshold'],
arguments['--area-threshold'],
less_than = True, starting_label=max_pos + 1)
## add the positive and negative together to make one cluster map
clusters_out = os.path.join(tmpdir,'clusters.dscalar.nii')
ciftify.utils.run(['wb_command', '-cifti-math "(x+y)"',
clusters_out,
'-var','x',pcluster_dscalar, '-var','y',ncluster_dscalar])
return clusters_out
def wb_cifti_clusters(input_cifti, output_cifti, surf_settings,
value_threshold, minimun_size,less_than, starting_label=1):
'''runs wb_command -cifti-find-clusters'''
wb_arglist = ['wb_command', '-cifti-find-clusters',
input_cifti,
str(value_threshold), str(minimun_size),
str(value_threshold), str(minimun_size),
'COLUMN',
output_cifti,
'-left-surface', surf_settings.L.surface,
'-corrected-areas', surf_settings.L.vertex_areas,
'-right-surface', surf_settings.R.surface,
'-corrected-areas', surf_settings.R.vertex_areas,
'-start', str(starting_label)]
if less_than : wb_arglist.append('-less-than')
cinfo = ciftify.io.cifti_info(input_cifti)
if cinfo['maps_to_volume']: wb_arglist.append('-merged-volume')
ciftify.utils.run(wb_arglist)
def write_statclust_peaktable(data_file, clusters_dscalar, outputbase,
arguments, surf_settings, atlas_settings):
'''runs the old peak table functionality
Parameters
----------
data_file : filepath
path to the dscalar map input
clusters_dscalar : filepath
path to the cluster file created with same settings
outputbase :
the prefix for the outputfile
arguments : dict
the user args dictionary to pull the thresholds from
surf_settings : dict
the dictionary of paths to the surface files,
created by ciftify.report.CombinedSurfaceSettings
altas_settings : dict
dictionary of paths and settings related to the atlases to use for overlaps
comparison. Created by ciftify.report.define_atlas_settings()
Outputs
-------
writes a csv to <outputbase>_cortex_peaks.csv
'''
with ciftify.utils.TempDir() as ex_tmpdir:
## run FSL's cluster on the subcortical bits
## now to run FSL's cluster on the subcortical bits
cinfo = ciftify.io.cifti_info(data_file)
if cinfo['maps_to_volume']:
subcortical_vol = os.path.join(ex_tmpdir, 'subcortical.nii.gz')
ciftify.utils.run(['wb_command', '-cifti-separate', data_file, 'COLUMN', '-volume-all', subcortical_vol])
fslcluster_cmd = ['cluster',
'--in={}'.format(subcortical_vol),
'--thresh={}'.format(arguments['--max-threshold']),
'--peakdist={}'.format(arguments['--volume-distance'])]
peak_table = ciftify.utils.get_stdout(fslcluster_cmd)
with open("{}_subcortical_peaks.csv".format(outputbase), "w") as text_file:
text_file.write(peak_table.replace('/t',','))
else:
logger.info('No subcortical volume data in {}'.format(data_file))
## run wb_command -cifti-extrema to find the peak locations
extrema_dscalar = os.path.join(ex_tmpdir,'extrema.dscalar.nii')
ciftify.utils.run(['wb_command','-cifti-extrema',
data_file,
str(arguments['--surface-distance']),
str(arguments['--volume-distance']),
'COLUMN',
extrema_dscalar,
'-left-surface', surf_settings.L.surface,
'-right-surface', surf_settings.R.surface,
'-threshold',
str(arguments['--min-threshold']),
str(arguments['--max-threshold'])])
## multiply the cluster labels by the extrema to get the labeled exteama
lab_extrema_dscalar = os.path.join(ex_tmpdir,'lab_extrema.dscalar.nii')
| |
<gh_stars>0
# -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tencentcloud.common.abstract_model import AbstractModel
class AddClusterInstancesRequest(AbstractModel):
"""AddClusterInstances请求参数结构体
"""
def __init__(self):
"""
:param ClusterId: 集群ID
:type ClusterId: str
:param InstanceIdList: 云主机ID列表
:type InstanceIdList: list of str
:param OsName: 操作系统名称
:type OsName: str
:param ImageId: 操作系统镜像ID
:type ImageId: str
:param Password: <PASSWORD>
:type Password: str
:param KeyId: 重装系统,关联密钥设置
:type KeyId: str
:param SgId: 安全组设置
:type SgId: str
:param InstanceImportMode: 云主机导入方式,虚拟机集群必填,容器集群不填写此字段,R:重装TSF系统镜像,M:手动安装agent
:type InstanceImportMode: str
:param OsCustomizeType: 镜像定制类型
:type OsCustomizeType: str
"""
self.ClusterId = None
self.InstanceIdList = None
self.OsName = None
self.ImageId = None
self.Password = None
self.KeyId = None
self.SgId = None
self.InstanceImportMode = None
self.OsCustomizeType = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.InstanceIdList = params.get("InstanceIdList")
self.OsName = params.get("OsName")
self.ImageId = params.get("ImageId")
self.Password = params.get("Password")
self.KeyId = params.get("KeyId")
self.SgId = params.get("SgId")
self.InstanceImportMode = params.get("InstanceImportMode")
self.OsCustomizeType = params.get("OsCustomizeType")
class AddClusterInstancesResponse(AbstractModel):
"""AddClusterInstances返回参数结构体
"""
def __init__(self):
"""
:param Result: 添加云主机的返回列表
注意:此字段可能返回 null,表示取不到有效值。
:type Result: :class:`tencentcloud.tsf.v20180326.models.AddInstanceResult`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Result = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Result") is not None:
self.Result = AddInstanceResult()
self.Result._deserialize(params.get("Result"))
self.RequestId = params.get("RequestId")
class AddInstanceResult(AbstractModel):
"""添加实例到集群的结果
"""
def __init__(self):
"""
:param FailedInstanceIds: 添加集群失败的节点列表
注意:此字段可能返回 null,表示取不到有效值。
:type FailedInstanceIds: list of str
:param SuccInstanceIds: 添加集群成功的节点列表
注意:此字段可能返回 null,表示取不到有效值。
:type SuccInstanceIds: list of str
:param TimeoutInstanceIds: 添加集群超时的节点列表
注意:此字段可能返回 null,表示取不到有效值。
:type TimeoutInstanceIds: list of str
"""
self.FailedInstanceIds = None
self.SuccInstanceIds = None
self.TimeoutInstanceIds = None
def _deserialize(self, params):
self.FailedInstanceIds = params.get("FailedInstanceIds")
self.SuccInstanceIds = params.get("SuccInstanceIds")
self.TimeoutInstanceIds = params.get("TimeoutInstanceIds")
class AddInstancesRequest(AbstractModel):
"""AddInstances请求参数结构体
"""
def __init__(self):
"""
:param ClusterId: 集群ID
:type ClusterId: str
:param InstanceIdList: 云主机ID列表
:type InstanceIdList: list of str
:param OsName: 操作系统名称
:type OsName: str
:param ImageId: 操作系统镜像ID
:type ImageId: str
:param Password: <PASSWORD>
:type Password: str
:param KeyId: 重装系统,关联密钥设置
:type KeyId: str
:param SgId: 安全组设置
:type SgId: str
:param InstanceImportMode: 云主机导入方式,虚拟机集群必填,容器集群不填写此字段,R:重装TSF系统镜像,M:手动安装agent
:type InstanceImportMode: str
"""
self.ClusterId = None
self.InstanceIdList = None
self.OsName = None
self.ImageId = None
self.Password = None
self.KeyId = None
self.SgId = None
self.InstanceImportMode = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.InstanceIdList = params.get("InstanceIdList")
self.OsName = params.get("OsName")
self.ImageId = params.get("ImageId")
self.Password = params.get("Password")
self.KeyId = params.get("KeyId")
self.SgId = params.get("SgId")
self.InstanceImportMode = params.get("InstanceImportMode")
class AddInstancesResponse(AbstractModel):
"""AddInstances返回参数结构体
"""
def __init__(self):
"""
:param Result: 添加云主机是否成功
注意:此字段可能返回 null,表示取不到有效值。
:type Result: bool
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Result = None
self.RequestId = None
def _deserialize(self, params):
self.Result = params.get("Result")
self.RequestId = params.get("RequestId")
class ApiDefinitionDescr(AbstractModel):
"""API 对象类型描述
"""
def __init__(self):
"""
:param Name: 对象名称
:type Name: str
:param Properties: 对象属性列表
:type Properties: list of PropertyField
"""
self.Name = None
self.Properties = None
def _deserialize(self, params):
self.Name = params.get("Name")
if params.get("Properties") is not None:
self.Properties = []
for item in params.get("Properties"):
obj = PropertyField()
obj._deserialize(item)
self.Properties.append(obj)
class ApiDetailResponse(AbstractModel):
"""ApiDetailResponse
"""
def __init__(self):
"""
:param Request: API 请求参数
:type Request: list of ApiRequestDescr
:param Response: API 响应参数
注意:此字段可能返回 null,表示取不到有效值。
:type Response: list of ApiResponseDescr
:param Definitions: API 复杂结构定义
:type Definitions: list of ApiDefinitionDescr
:param RequestContentType: API 的 content type
注意:此字段可能返回 null,表示取不到有效值。
:type RequestContentType: str
:param CanRun: API 能否调试
注意:此字段可能返回 null,表示取不到有效值。
:type CanRun: bool
:param Status: API 状态 0:离线 1:在线,默认0
注意:此字段可能返回 null,表示取不到有效值。
:type Status: int
"""
self.Request = None
self.Response = None
self.Definitions = None
self.RequestContentType = None
self.CanRun = None
self.Status = None
def _deserialize(self, params):
if params.get("Request") is not None:
self.Request = []
for item in params.get("Request"):
obj = ApiRequestDescr()
obj._deserialize(item)
self.Request.append(obj)
if params.get("Response") is not None:
self.Response = []
for item in params.get("Response"):
obj = ApiResponseDescr()
obj._deserialize(item)
self.Response.append(obj)
if params.get("Definitions") is not None:
self.Definitions = []
for item in params.get("Definitions"):
obj = ApiDefinitionDescr()
obj._deserialize(item)
self.Definitions.append(obj)
self.RequestContentType = params.get("RequestContentType")
self.CanRun = params.get("CanRun")
self.Status = params.get("Status")
class ApiRequestDescr(AbstractModel):
"""ApiRequestDescr
"""
def __init__(self):
"""
:param Name: 参数名称
:type Name: str
:param Type: 参数类型
:type Type: str
:param In: 参数位置
:type In: str
:param Description: 参数描述
:type Description: str
:param Required: 参数是否必须
:type Required: bool
:param DefaultValue: 参数的默认值
注意:此字段可能返回 null,表示取不到有效值。
:type DefaultValue: str
"""
self.Name = None
self.Type = None
self.In = None
self.Description = None
self.Required = None
self.DefaultValue = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Type = params.get("Type")
self.In = params.get("In")
self.Description = params.get("Description")
self.Required = params.get("Required")
self.DefaultValue = params.get("DefaultValue")
class ApiResponseDescr(AbstractModel):
"""API 响应的参数结构描述
"""
def __init__(self):
"""
:param Name: 参数描述
:type Name: str
:param Type: 参数类型
:type Type: str
:param Description: 参数描述
:type Description: str
"""
self.Name = None
self.Type = None
self.Description = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Type = params.get("Type")
self.Description = params.get("Description")
class ApiVersionArray(AbstractModel):
"""API版本数组
"""
def __init__(self):
"""
:param ApplicationId: App ID
注意:此字段可能返回 null,表示取不到有效值。
:type ApplicationId: str
:param ApplicationName: App 名称
注意:此字段可能返回 null,表示取不到有效值。
:type ApplicationName: str
:param PkgVersion: App 包版本
注意:此字段可能返回 null,表示取不到有效值。
:type PkgVersion: str
"""
self.ApplicationId = None
self.ApplicationName = None
self.PkgVersion = None
def _deserialize(self, params):
self.ApplicationId = params.get("ApplicationId")
self.ApplicationName = params.get("ApplicationName")
self.PkgVersion = params.get("PkgVersion")
class ApplicationAttribute(AbstractModel):
"""应用列表其它字段
"""
def __init__(self):
"""
:param InstanceCount: 总实例个数
注意:此字段可能返回 null,表示取不到有效值。
:type InstanceCount: int
:param RunInstanceCount: 运行实例个数
注意:此字段可能返回 null,表示取不到有效值。
:type RunInstanceCount: int
:param GroupCount: 应用下部署组个数
注意:此字段可能返回 null,表示取不到有效值。
:type GroupCount: int
"""
self.InstanceCount = None
self.RunInstanceCount = None
self.GroupCount = None
def _deserialize(self, params):
self.InstanceCount = params.get("InstanceCount")
self.RunInstanceCount = params.get("RunInstanceCount")
self.GroupCount = params.get("GroupCount")
class ApplicationForPage(AbstractModel):
"""分页的应用描述信息字段
"""
def __init__(self):
"""
:param ApplicationId: 应用ID
注意:此字段可能返回 null,表示取不到有效值。
:type ApplicationId: str
:param ApplicationName: 应用名称
注意:此字段可能返回 null,表示取不到有效值。
:type ApplicationName: str
:param ApplicationDesc: 应用描述
注意:此字段可能返回 null,表示取不到有效值。
:type ApplicationDesc: str
:param ApplicationType: 应用类型
注意:此字段可能返回 null,表示取不到有效值。
:type ApplicationType: str
:param MicroserviceType: 微服务类型
注意:此字段可能返回 null,表示取不到有效值。
:type MicroserviceType: str
:param ProgLang: 编程语言
注意:此字段可能返回 null,表示取不到有效值。
:type ProgLang: str
:param CreateTime: 创建时间
注意:此字段可能返回 null,表示取不到有效值。
:type CreateTime: str
:param UpdateTime: 更新时间
注意:此字段可能返回 null,表示取不到有效值。
:type UpdateTime: str
:param ApplicationResourceType: 应用资源类型
注意:此字段可能返回 null,表示取不到有效值。
:type ApplicationResourceType: str
:param ApplicationRuntimeType: 应用runtime类型
注意:此字段可能返回 null,表示取不到有效值。
:type ApplicationRuntimeType: str
:param ApigatewayServiceId: Apigateway的serviceId
注意:此字段可能返回 null,表示取不到有效值。
:type ApigatewayServiceId: str
"""
self.ApplicationId = None
self.ApplicationName = None
self.ApplicationDesc = None
self.ApplicationType = None
self.MicroserviceType = None
self.ProgLang = None
self.CreateTime = None
self.UpdateTime = None
self.ApplicationResourceType = None
self.ApplicationRuntimeType = None
self.ApigatewayServiceId = None
def _deserialize(self, params):
self.ApplicationId = params.get("ApplicationId")
self.ApplicationName = params.get("ApplicationName")
self.ApplicationDesc = params.get("ApplicationDesc")
self.ApplicationType = params.get("ApplicationType")
self.MicroserviceType = params.get("MicroserviceType")
self.ProgLang = params.get("ProgLang")
self.CreateTime = params.get("CreateTime")
self.UpdateTime = params.get("UpdateTime")
self.ApplicationResourceType = params.get("ApplicationResourceType")
self.ApplicationRuntimeType = params.get("ApplicationRuntimeType")
self.ApigatewayServiceId = params.get("ApigatewayServiceId")
class Cluster(AbstractModel):
"""集群
"""
def __init__(self):
"""
:param ClusterId: 集群ID
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterId: str
:param ClusterName: 集群名称
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterName: str
:param ClusterDesc: 集群描述
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterDesc: str
:param ClusterType: 集群类型
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterType: str
:param VpcId: 集群所属私有网络ID
注意:此字段可能返回 null,表示取不到有效值。
:type VpcId: str
:param ClusterStatus: 集群状态
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterStatus: str
:param ClusterCIDR: 集群CIDR
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterCIDR: str
:param ClusterTotalCpu: 集群总CPU,单位: 核
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterTotalCpu: float
:param ClusterTotalMem: 集群总内存,单位: G
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterTotalMem: float
:param ClusterUsedCpu: 集群已使用CPU,单位: 核
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterUsedCpu: float
:param ClusterUsedMem: 集群已使用内存,单位: G
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterUsedMem: float
:param InstanceCount: 集群机器实例数量
注意:此字段可能返回 null,表示取不到有效值。
:type InstanceCount: int
:param RunInstanceCount: 集群可用的机器实例数量
注意:此字段可能返回 null,表示取不到有效值。
:type RunInstanceCount: int
:param NormalInstanceCount: 集群正常状态的机器实例数量
注意:此字段可能返回 null,表示取不到有效值。
:type NormalInstanceCount: int
:param DeleteFlag: 删除标记:true:可以删除;false:不可删除
注意:此字段可能返回 null,表示取不到有效值。
:type DeleteFlag: bool
:param CreateTime: 创建时间
注意:此字段可能返回 null,表示取不到有效值。
:type CreateTime: str
:param UpdateTime: 更新时间
注意:此字段可能返回 null,表示取不到有效值。
:type UpdateTime: str
:param TsfRegionId: 集群所属TSF地域ID
注意:此字段可能返回 null,表示取不到有效值。
:type TsfRegionId: str
:param TsfRegionName: 集群所属TSF地域名称
注意:此字段可能返回 null,表示取不到有效值。
:type TsfRegionName: str
:param TsfZoneId: 集群所属TSF可用区ID
注意:此字段可能返回 null,表示取不到有效值。
:type TsfZoneId: str
:param TsfZoneName: 集群所属TSF可用区名称
注意:此字段可能返回 null,表示取不到有效值。
:type TsfZoneName: str
:param DeleteFlagReason: 集群不可删除的原因
注意:此字段可能返回 null,表示取不到有效值。
:type DeleteFlagReason: str
:param ClusterLimitCpu: 集群最大CPU限制,单位:核
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterLimitCpu: float
:param ClusterLimitMem: 集群最大内存限制,单位:G
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterLimitMem: float
:param RunServiceInstanceCount: 集群可用的服务实例数量
注意:此字段可能返回 null,表示取不到有效值。
:type RunServiceInstanceCount: int
:param SubnetId: 集群所属子网ID
注意:此字段可能返回 null,表示取不到有效值。
:type SubnetId: str
:param OperationInfo: 返回给前端的控制信息
注意:此字段可能返回 null,表示取不到有效值。
:type OperationInfo: :class:`tencentcloud.tsf.v20180326.models.OperationInfo`
"""
self.ClusterId = None
self.ClusterName = None
self.ClusterDesc = None
self.ClusterType = None
self.VpcId = None
self.ClusterStatus = None
self.ClusterCIDR = None
self.ClusterTotalCpu = None
self.ClusterTotalMem = None
self.ClusterUsedCpu = None
self.ClusterUsedMem = None
self.InstanceCount = None
self.RunInstanceCount = None
self.NormalInstanceCount | |
#!/usr/bin/env python3
#
# Name: mrf_join
# Purpose:
'''Joins multiple MRF files with the same structure into a single one'''
# Created: 11/08/2018
# Updated: 12/14/2018 - Added Z dimension append mode
# Updated: 12/09/2020 - Updated to python3
#
# Author: <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import io
import sys
import array
import argparse
import glob
# hexversion >> 16 >= 0x306 (for 3.6 or later)
assert sys.hexversion >> 24 >= 0x3, "Python 3 required"
def appendfile(srcname, dstname):
with open(dstname, 'ab') as ofile:
with open(srcname, 'rb') as ifile:
for chunk in iter(lambda : ifile.read(1024 * 1024), b""):
ofile.write(chunk)
def mrf_join(argv, forceoffset = None):
'''Input file given as list, the last one is the output
Given the data file names, including the extension, which should be the same
for all files, the .idx and the .mrf extensions are assumed.
The last file name is the otput, it will be created in case it doesn't exist.
Tile from inputs are added in the order in which they appear on the command line,
except the output file, if it exists, which ends up first.
'''
assert len(argv) >= 2,\
"Takes a list of input mrf data files to be concatenated, the last is the output, which will be created if needed"
ofname, ext = os.path.splitext(argv[-1])
assert ext not in ('.mrf', '.idx'),\
"Takes data file names as input, not the .mrf or .idx"
input_list = argv[:-1]
for f in input_list:
assert os.path.splitext(f)[1] == ext,\
"All input files should have the same extension"
if not os.path.isfile(ofname + ext): # Create the output using the first file info
ffname = os.path.splitext(input_list[0])[0]
# Copy the .mrf file content
with open(ffname + '.mrf', "rb") as mrf_file:
with open(ofname + '.mrf', "wb") as omrf_file:
omrf_file.write(mrf_file.read())
with open(ofname + '.idx', "wb") as idx_file:
idx_file.truncate(os.path.getsize(ffname + '.idx'))
# Only create the data file if forceoffset is not given
if forceoffset is None:
with open(ofname + ext, "wb") as data_file:
pass
idxsize = os.path.getsize(ofname + '.idx')
for f in input_list:
assert os.path.getsize(os.path.splitext(f)[0] + '.idx') == idxsize,\
"All input index files should have the same size {}, {} does not".format(idxsize, f)
# At this point the output exist, loop over the inputs
for input_file in argv[:-1]:
print("Processing {}".format(input_file))
fname = os.path.splitext(input_file)[0]
offset = forceoffset
if offset is None:
# Offset to adjust start of tiles in this input
offset = os.path.getsize(ofname + ext)
# Copy the data file at the end of the current file, in 1MB chunks
appendfile(input_file, ofname + ext)
# Now for the hard job, tile by tile, adjust the index and write it
with open(ofname + '.idx', 'r+b') as ofile:
with open(fname + '.idx', 'rb') as ifile:
while True: # Process index files, block at a time
# Read as quads, to avoid individual conversions
outidx = array.array('Q')
inidx = array.array('Q')
try: # Read a chunk of the output
outidx.fromfile(ofile, 512 // outidx.itemsize)
except EOFError:
if len(outidx) == 0:
break # This is the exit condition, reached the end of index
try: # Same for the input index
inidx.fromfile(ifile, 512 // outidx.itemsize)
except EOFError:
# This has to be the last non-zero chunk, size matches the output file read
assert len(inidx) == len(outidx), \
"Error reading from index file {}".format(fname + '.idx')
# If the input block is all zeros, no need to write it
if inidx.count(0) == len(outidx):
continue
# Got some input content, there is work to do
if sys.byteorder != 'big': # MRF index is always big endian
inidx.byteswap()
outidx.byteswap()
for i in range(0, len(inidx), 2):
if inidx[i + 1] != 0: # Copy existing tiles indices
outidx[i] = inidx[i] + offset # Add the starting offset to every tile
outidx[i + 1] = inidx[i + 1]
if sys.byteorder != 'big':
outidx.byteswap()
# Write it where it was read from
ofile.seek(- len(outidx) * outidx.itemsize, io.SEEK_CUR)
outidx.tofile(ofile)
# Integer division of x/y, rounded up
def rupdiv(x, y):
return 1 + (x - 1) // y
def getpcount(size, pagesize):
return rupdiv(size['x'], pagesize['x'])\
* rupdiv(size['y'], pagesize['y'])\
* rupdiv(size['c'], pagesize['c'])
def getmrfinfo(fname):
import xml.etree.ElementTree as ET
tree = ET.parse(fname)
root = tree.getroot()
assert root.tag == "MRF_META", "{} is not an MRF".format(fname)
info = {}
info['size'] = { key : int(val) for (key, val) in
root.find("./Raster/Size").attrib.items() }
if root.find("./Raster/PageSize"):
info['pagesize'] = { key : int(val) for (key, val) in
root.find("./Raster/PageSize").attrib.items() }
else:
info['pagesize'] = {
'x' : 512,
'y' : 512,
'c' : info['size']['c'] if 'c' in info['size'] else 1
}
if root.find("./Rsets") is not None:
assert root.find("./Rsets").get('model') == 'uniform', "Only uniform model rsets are supported"
try:
info['scale'] = int(root.find("./Rsets").get('scale'))
except:
info['scale'] = 2
# compute the pagecount per level, level 0 always exists
sz = info['size']
info['pages'] = [getpcount(sz, info['pagesize'])]
if 'scale' in info:
scale = info['scale']
# This is either 1 or the number of bands
bandpages = rupdiv(info['size']['c'], info['pagesize']['c'])
while info['pages'][-1] != bandpages:
sz['x'] = rupdiv(sz['x'], scale)
sz['y'] = rupdiv(sz['y'], scale)
info['pages'].append(getpcount(sz, info['pagesize']))
info['totalpages'] = sum(info['pages'])
return info, tree
# Creates the file if it doesn't exist, then truncates it to the given size
def ftruncate(fname, size = 0):
try:
with open(fname, "r+b") as f:
assert os.path.getsize(fname) <= size, "Output index file exists and has the wrong size"
f.truncate(size)
except:
with open(fname, "wb") as f:
f.truncate(size)
def write_mrf(tree, zsz, fname):
root = tree.getroot()
assert root.tag == "MRF_META", "Invalid tree, not an mrf"
size = root.find("./Raster/Size")
size.set('z', str(zsz))
tree.write(fname)
def mrf_append(inputs, output, outsize, startidx = 0):
ofname, ext = os.path.splitext(output)
assert ext not in ('.mrf', '.idx'),\
"Takes data file names as arguments"
for f in inputs:
assert os.path.splitext(f)[1] == ext,\
"All input files should have the same extension as the output"
# Get the template mrf information from the first input
mrfinfo, tree = getmrfinfo(os.path.splitext(inputs[1])[0] + ".mrf", ofname + ".mrf")
# Create the output .mrf if it doesn't exist
if not os.path.isfile(ofname + ".mrf"):
write_mrf(tree, outsize, ofname + ".mrf")
inidxsize = 16 * mrfinfo['totalpages']
outidxsize = outsize * inidxsize
# Make sure the output is the right size
ftruncate(ofname + ".idx", outidxsize)
if not os.path.isfile(output):
# Try to create it
with open(output, "wb") as o:
pass
for fn in inputs:
# Create the output file if not there and get its current size
with open(output, "a+b") as o:
dataoffset = os.path.getsize(output)
fname, iext = os.path.splitext(fn)
assert iext == ext, \
"File {} should have extension {}".format(fn, ext)
assert os.path.getsize(fname + ".idx") == inidxsize, \
"Index for file {} has invalid size, expected {}".format(fn, inidxsize)
appendfile(fn, output)
with open(fname + ".idx", "rb") as inidx:
with open(ofname + ".idx", "r+b") as outidx:
for level in range(len(mrfinfo['pages'])):
outidxoffset = startidx * mrfinfo['pages'][level]
if level > 0:
outidxoffset += sum(mrfinfo['pages'][0:level]) * outsize
outidx.seek(16 * outidxoffset, io.SEEK_SET)
# Copy tile by tile, don't write zeros
for tnum in range(mrfinfo['pages'][level]):
tinfo = array.array('Q')
tinfo.fromfile(inidx, 2)
# If the input block is all zeros, no need to write it
if tinfo.count(0) == len(tinfo):
outidx.seek(16, io.SEEK_CUR)
continue
if sys.byteorder != 'big':
tinfo.byteswap()
tinfo[0] += dataoffset
if sys.byteorder != 'big':
tinfo.byteswap()
tinfo.tofile(outidx)
startidx += 1
def main():
def auto_int(x):
return int(x, 0)
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output",
help = "Output file name, otherwise the last file name is the output")
parser.add_argument("-z", "--zsize", type = auto_int,
help = "The output is a 3rd dimension MRF into which inputs are inserted as slices")
parser.add_argument("-s", "--slice", type = auto_int,
help = "Used only with -z, which is the first target slice, defaults to 0")
parser.add_argument("-f", "--forceoffset", type = auto_int,
help = "Provide an offset to be used when | |
<reponame>chadtilbury/hindsight
"""
Copyright 2020, CCL Forensics
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import struct
import os
import pathlib
import io
import enum
import datetime
import dataclasses
import types
import typing
import ccl_leveldb
import ccl_v8_value_deserializer
import ccl_blink_value_deserializer
__version__ = "0.2"
__description__ = "Module for reading Chromium IndexedDB LevelDB databases."
__contact__ = "<NAME>"
# TODO: need to go through and ensure that we have endianness right in all cases
# (it should sit behind a switch for integers, fixed for most other stuff)
def _read_le_varint(stream: typing.BinaryIO, *, is_google_32bit=False):
# this only outputs unsigned
i = 0
result = 0
underlying_bytes = []
limit = 5 if is_google_32bit else 10
while i < limit:
raw = stream.read(1)
if len(raw) < 1:
return None
tmp, = raw
underlying_bytes.append(tmp)
result |= ((tmp & 0x7f) << (i * 7))
if (tmp & 0x80) == 0:
break
i += 1
return result, bytes(underlying_bytes)
def read_le_varint(stream: typing.BinaryIO, *, is_google_32bit=False):
x = _read_le_varint(stream, is_google_32bit=is_google_32bit)
if x is None:
return None
else:
return x[0]
def _le_varint_from_bytes(data: bytes):
with io.BytesIO(data) as buff:
return _read_le_varint(buff)
def le_varint_from_bytes(data: bytes):
with io.BytesIO(data) as buff:
return read_le_varint(buff)
class IdbKeyType(enum.IntEnum):
Null = 0
String = 1
Date = 2
Number = 3
Array = 4
MinKey = 5
Binary = 6
class IdbKey:
# See: https://github.com/chromium/chromium/blob/master/content/browser/indexed_db/indexed_db_leveldb_coding.cc
def __init__(self, buffer: bytes):
self.raw_key = buffer
self.key_type = IdbKeyType(buffer[0])
raw_key = buffer[1:]
if self.key_type == IdbKeyType.Null:
self.value = None
self._raw_length = 1
elif self.key_type == IdbKeyType.String:
str_len, varint_raw = _le_varint_from_bytes(raw_key)
self.value = raw_key[len(varint_raw):len(varint_raw) + str_len * 2].decode("utf-16-be")
self._raw_length = 1 + len(varint_raw) + str_len * 2
elif self.key_type == IdbKeyType.Date:
ts, = struct.unpack("<d", raw_key[0:8])
self.value = datetime.datetime(1970, 1, 1) + datetime.timedelta(milliseconds=ts)
self._raw_length = 9
elif self.key_type == IdbKeyType.Number:
self.value = struct.unpack("<d", raw_key[0:8])[0]
self._raw_length = 9
elif self.key_type == IdbKeyType.Array:
array_count, varint_raw = _le_varint_from_bytes(raw_key)
raw_key = raw_key[len(varint_raw):]
self.value = []
self._raw_length = 1 + len(varint_raw)
for i in range(array_count):
key = IdbKey(raw_key)
raw_key = raw_key[key._raw_length:]
self._raw_length += key._raw_length
self.value.append(key)
self.value = tuple(self.value)
elif self.key_type == IdbKeyType.MinKey:
# TODO: not sure what this actually implies, the code doesn't store a value
self.value = None
self._raw_length = 1
raise NotImplementedError()
elif self.key_type == IdbKeyType.Binary:
bin_len, varint_raw = _le_varint_from_bytes(raw_key)
self.value = raw_key[len(varint_raw):len(varint_raw) + bin_len]
self._raw_length = 1 + len(varint_raw) + bin_len
else:
raise ValueError() # Shouldn't happen
def __repr__(self):
return f"<IdbKey {self.value}>"
def __str__(self):
return self.__repr__()
class IndexedDBExternalObjectType(enum.IntEnum):
# see: https://github.com/chromium/chromium/blob/master/content/browser/indexed_db/indexed_db_external_object.h
Blob = 0
File = 1
NativeFileSystemHandle = 2
class IndexedDBExternalObject:
# see: https://github.com/chromium/chromium/blob/master/content/browser/indexed_db/indexed_db_backing_store.cc
# for encoding.
def __init__(self, object_type: IndexedDBExternalObjectType, blob_number: typing.Optional[int],
mime_type: typing.Optional[str], size: typing.Optional[int],
file_name: typing.Optional[str], last_modified: typing.Optional[datetime.datetime],
native_file_token: typing.Optional):
self.object_type = object_type
self.blob_number = blob_number
self.mime_type = mime_type
self.size = size
self.file_name = file_name
self.last_modified = last_modified
self.native_file_token = native_file_token
@classmethod
def from_stream(cls, stream: typing.BinaryIO):
blob_type = IndexedDBExternalObjectType(stream.read(1)[0])
if blob_type in (IndexedDBExternalObjectType.Blob, IndexedDBExternalObjectType.File):
blob_number = read_le_varint(stream)
mime_type_length = read_le_varint(stream)
mime_type = stream.read(mime_type_length * 2).decode("utf-16-be")
data_size = read_le_varint(stream)
if blob_type == IndexedDBExternalObjectType.File:
file_name_length = read_le_varint(stream)
file_name = stream.read(file_name_length * 2).decode("utf-16-be")
x, x_raw = _read_le_varint(stream)
last_modified_td = datetime.timedelta(microseconds=x)
last_modified = datetime.datetime(1601, 1, 1) + last_modified_td
return cls(blob_type, blob_number, mime_type, data_size, file_name,
last_modified, None)
else:
return cls(blob_type, blob_number, mime_type, data_size, None, None, None)
else:
raise NotImplementedError()
@dataclasses.dataclass(frozen=True)
class DatabaseId:
dbid_no: int
origin: str
name: str
class GlobalMetadata:
def __init__(self, raw_meta_dict: dict):
# TODO: more of these meta types if required
self.backing_store_schema_version = None
if raw_schema_version := raw_meta_dict.get("\x00\x00\x00\x00\x00"):
self.backing_store_schema_version = le_varint_from_bytes(raw_schema_version)
self.max_allocated_db_id = None
if raw_max_db_id := raw_meta_dict.get("\x00\x00\x00\x00\x01"):
self.max_allocated_db_id = le_varint_from_bytes(raw_max_db_id)
database_ids_raw = (raw_meta_dict[x] for x in raw_meta_dict
if x.startswith(b"\x00\x00\x00\x00\xc9"))
dbids = []
for dbid_rec in database_ids_raw:
with io.BytesIO(dbid_rec.key[5:]) as buff:
origin_length = read_le_varint(buff)
origin = buff.read(origin_length * 2).decode("utf-16-be")
db_name_length = read_le_varint(buff)
db_name = buff.read(db_name_length * 2).decode("utf-16-be")
db_id_no = le_varint_from_bytes(dbid_rec.value)
dbids.append(DatabaseId(db_id_no, origin, db_name))
self.db_ids = tuple(dbids)
class DatabaseMetadataType(enum.IntEnum):
OriginName = 0 # String
DatabaseName = 1 # String
IdbVersionString = 2 # String (and obsolete)
MaximumObjectStoreId = 3 # Int
IdbVersion = 4 # Varint
BlobNumberGeneratorCurrentNumber = 5 # Varint
class DatabaseMetadata:
def __init__(self, raw_meta: dict):
self._metas = types.MappingProxyType(raw_meta)
def get_meta(self, db_id: int, meta_type: DatabaseMetadataType) -> typing.Optional[typing.Union[str, int]]:
record = self._metas.get((db_id, meta_type))
if not record:
return None
if meta_type == DatabaseMetadataType.MaximumObjectStoreId:
return le_varint_from_bytes(record.value)
# TODO
raise NotImplementedError()
class ObjectStoreMetadataType(enum.IntEnum):
StoreName = 0 # String
KeyPath = 1 # IDBKeyPath
AutoIncrementFlag = 2 # Bool
IsEvictable = 3 # Bool (and obsolete apparently)
LastVersionNumber = 4 # Int
MaximumAllocatedIndexId = 5 # Int
HasKeyPathFlag = 6 # Bool (and obsolete apparently)
KeygeneratorCurrentNumber = 7 # Int
class ObjectStoreMetadata:
# All metadata fields are prefaced by a 0x00 byte
def __init__(self, raw_meta: dict):
self._metas = types.MappingProxyType(raw_meta)
def get_meta(self, db_id: int, obj_store_id: int, meta_type: ObjectStoreMetadataType):
record = self._metas.get((db_id, obj_store_id, meta_type))
if not record:
return None
if meta_type == ObjectStoreMetadataType.StoreName:
return record.value.decode("utf-16-be")
# TODO
raise NotImplementedError()
class IndexedDbRecord:
def __init__(self, owner: "IndexedDb", db_id: int, obj_store_id: int, key: IdbKey, value: typing.Any):
self.owner = owner
self.db_id = db_id
self.obj_store_id = obj_store_id
self.key = key
self.value = value
def resolve_blob_index(self, blob_index: ccl_blink_value_deserializer.BlobIndex) -> IndexedDBExternalObject:
"""Resolve a ccl_blink_value_deserializer.BlobIndex to its IndexedDBExternalObject
to get metadata (file name, timestamps, etc)"""
return self.owner.get_blob_info(self.db_id, self.obj_store_id, self.key.raw_key, blob_index.index_id)
def get_blob_stream(self, blob_index: ccl_blink_value_deserializer.BlobIndex) -> typing.BinaryIO:
"""Resolve a ccl_blink_value_deserializer.BlobIndex to a stream of its content"""
return self.owner.get_blob(self.db_id, self.obj_store_id, self.key.raw_key, blob_index.index_id)
class IndexedDb:
# This will be informative for a lot of the data below:
# https://github.com/chromium/chromium/blob/master/content/browser/indexed_db/docs/leveldb_coding_scheme.md
# Of note, the first byte of the key defines the length of the db_id, obj_store_id and index_id in bytes:
# 0b xxxyyyzz (x = db_id size - 1, y = obj_store size - 1, z = index_id - 1)
# Currently I just assume that everything falls between 1 and 127 for simplicity as it makes scanning the keys
# lots easier.
def __init__(self, leveldb_dir: os.PathLike, leveldb_blob_dir: os.PathLike = None):
self._db = ccl_leveldb.RawLevelDb(leveldb_dir)
self._blob_dir = leveldb_blob_dir
self.global_metadata = GlobalMetadata(self._get_raw_global_metadata())
self.database_metadata = DatabaseMetadata(self._get_raw_database_metadata())
self.object_store_meta = ObjectStoreMetadata(self._get_raw_object_store_metadata())
self._blob_lookup_cache = {}
@staticmethod
def make_prefix(db_id: int, obj_store_id: int, index_id: int) -> bytes:
def count_bytes(val):
i = 0
while val > 0:
i += 1
val = val >> 8
return i
def yield_le_bytes(val):
if val < 0:
raise ValueError
while val > 0:
yield val & 0xff
val >> 8
db_id_size = count_bytes(db_id)
obj_store_id_size = count_bytes(obj_store_id)
index_id_size = count_bytes(index_id)
if db_id_size > 8 or obj_store_id_size > 8 or index_id_size > 4:
raise ValueError("id sizes are too big")
byte_one = ((db_id_size - 1) << 5) | ((obj_store_id_size - 1) << 2) | index_id_size
return bytes([byte_one, *yield_le_bytes(db_id), *yield_le_bytes(obj_store_id), *yield_le_bytes(index_id)])
def get_database_metadata(self, db_id: int, meta_type: DatabaseMetadataType):
return self.database_metadata.get_meta(db_id, meta_type)
def get_object_store_metadata(self, db_id: int, obj_store_id: int, meta_type: ObjectStoreMetadataType):
return self.object_store_meta.get_meta(db_id, obj_store_id, meta_type)
def _get_raw_global_metadata(self, live_only=True) -> typing.Dict[bytes, ccl_leveldb.Record]:
# Global metadata always has the prefix 0 0 0 0
if not live_only:
raise NotImplementedError("Deleted metadata not implemented yet")
meta = {}
for record in self._db.iterate_records_raw(reverse=True):
if record.key.startswith(b"\x00\x00\x00\x00") and record.state == ccl_leveldb.KeyState.Live:
# we only want live keys and the newest version thereof (highest seq)
if record.key not in meta or meta[record.key].seq < record.seq:
meta[record.key] = record
return meta
def _get_raw_database_metadata(self, live_only=True):
if not live_only:
raise NotImplementedError("Deleted metadata not implemented yet")
db_meta = {}
for db_id in self.global_metadata.db_ids:
if db_id.dbid_no > 0x7f:
raise NotImplementedError("there could be this many dbs, but I don't support it yet")
prefix = bytes([0, db_id.dbid_no, 0, 0])
for record in | |
<reponame>rcmelendez/devo-lapua-docker
# -*- coding: utf-8 -*-
import socket
import json
import re
import cmd
import os
import math
import time
import readline
import pytz
from .utils.constants import PERIOD_REGEX, NUMBER_REGEX, STATUS, \
MESSAGES, OK_MESSAGE, KO_MESSAGE, \
END_OF_MESSAGE, LAPUA_VERSION
from .utils.config import get_config
from .utils.data import MODELS
__author__ = 'Devo (<NAME>)'
def complete_timezones(text, state):
options = [x for x in pytz.all_timezones if x.startswith(text)]
try:
return options[state]
except IndexError:
return None
def default_completer(text, state):
return []
class CmdTest(cmd.Cmd):
CUSTOM = 'custom'
MANDATORY_PARAMS = ["instance", "auth", "domain", "table", "period"]
LOGGING_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR']
DEFAULT_NAME_TEMPLATE = '{domain}__{table}__{period}__{time}'
ALLOWED_AGG_FUNCTIONS = ['count', 'avg', 'min', 'max', 'hllppcount',
'median', 'nnavg', 'nnstddev', 'nnustddev',
'nnvar', 'nnuvar', 'percentile25', 'percentile5',
'percentile75', 'percentile95', 'stddev',
'ustddev', 'sum', 'sum2', 'var', 'uvar']
DOMAIN_REGEX = re.compile('where.*client\s*=\s*[\'\"](\w+)[\'\"]')
TABLE_REGEX = re.compile('from\s*([\w\.]+)')
EVENTDATE_REGEX = re.compile('where.*eventdate')
EVERY_REGEX = re.compile('group.*(every\s*(\d+[smhd]))')
BY_REGEX = re.compile('group.*by\s*(.*)\s*select')
COUNT_REGEX = re.compile('(group[\s\S]+select (?:{})\((?:\w*)\)\s+'
'as\s+\w+\s*(?:where\s*\w+\s*>\s*(\d+))?$)'.format(
'|'.join(ALLOWED_AGG_FUNCTIONS)))
def __init__(self, conn):
cmd.Cmd.__init__(self)
self.conn = conn
# OVERRIDING
prompt = 'lapua> '
def emptyline(self):
pass
def do_version(self, line):
"""version
Returns deployed Lapua version."""
print('v{}'.format(LAPUA_VERSION))
def do_exit(self, line):
"""exit
Close the client."""
self.conn.send(self._build_msg('quit'))
self.conn.close()
print('Connection closed')
return True
def do_quit(self, line):
"""quit
Close the client."""
return self.do_exit(line)
def do_get(self, job_name):
"""get <job_name>
Get the configuration for <job_name> job.
<job_name> must exist."""
self.conn.send(self._build_msg('get', job_name))
resp = self._receive()
if resp.endswith('.json'):
resp = resp[:-5]
if resp == KO_MESSAGE:
print('Error getting job config. Please, try again.')
elif resp.startswith(OK_MESSAGE):
if job_name.endswith('.json'):
job_name = job_name[:-5]
config = resp.replace(OK_MESSAGE, '')
config = json.loads(config)
print(json.dumps(config, indent=4))
else:
print('Error: {}'.format(resp))
def do_test(self, job_name):
"""test <job_name>
Test the configuration for <job_name> job.
<job_name> must exist."""
self.conn.send(self._build_msg('test', job_name))
resp = self._receive()
if resp == KO_MESSAGE:
print('Error getting job config. Please, try again.')
elif resp.startswith(OK_MESSAGE):
result = json.loads(resp.replace(OK_MESSAGE, ''))
print('Results for job {}:'.format(job_name))
print(json.dumps(result, indent=4))
else:
print('Error: {}'.format(resp))
def do_load(self, job_path):
"""load <job_path>
Load the configuration from <job_path> and create a new job.
<job_path> must exist."""
if not os.path.isfile(job_path):
print("Error: File {} not found".format(job_path))
return
try:
with open(job_path, 'r') as file_desc:
config = json.load(file_desc)
except ValueError:
print("Error: File {} is not a JSON file".format(job_path))
return
config['name'] = os.path.splitext(os.path.basename(job_path))[0]
job = self._handle_creation(config)
if job:
self.conn.send(job)
resp = self._receive()
if not resp == KO_MESSAGE:
print('Job {} successfully created. '
'Would you like to start running it?'.format(resp))
while True:
response = self._prompt("(y/n): ", is_input=True)\
.lower().strip()
if response in ['y', 'n', 'yes', 'no']:
if response[0] == 'y':
self.do_run(resp)
break
print('Error: Enter "(y)es" or "(n)o"')
else:
print('Error creating job. Please, try again.')
def do_list(self, line):
"""list
Display all running and stopped jobs."""
self.conn.send(self._build_msg('list', line or '*'))
resp = self._receive()
if resp == KO_MESSAGE:
print('Error listing jobs. Please, try again.')
else:
print('Current jobs status:')
status = json.loads(resp)
running_jobs = []
stopped_jobs = []
for k, v in status.items():
k = k.replace('.json', '')
if v == STATUS.RUNNING:
running_jobs.append(k)
else:
stopped_jobs.append(k)
print('\n\tRUNNING[{}]'.format(len(running_jobs)))
for k in running_jobs:
print('\t - {}'.format(k))
print('\n\tSTOPPED[{}]'.format(len(stopped_jobs)))
for k in stopped_jobs:
print('\t - {}'.format(k))
def do_manage(self, line):
"""manage <domain/job> <name> <limit>
Sets <domain/job> limit."""
args = line.split(' ')
if not len(args)==3:
print('Error: missing argument')
return
args[0] = args[0].lower()
if not args[0] in ['domain', 'job']:
print('Error: type must be one of [domain, limit]')
return
if not NUMBER_REGEX.match(args[2]):
print('Error: limit must be a number')
return
data = {'type': args[0], 'name': args[1], 'limit': int(args[2])}
self.conn.send(self._build_msg('manage', data))
resp = self._receive()
if resp == OK_MESSAGE:
print('Changes saved for {} {}.'.format(args[0], args[1]))
else:
print('Error managing {} {}.'.format(args[0], args[1]))
def do_run(self, job_name):
"""run <job_name>
Start running the <job_name> job.
<job_name> must exist and not being currently running."""
self.conn.send(self._build_msg('run', job_name))
resp = self._receive()
if resp.endswith('.json'):
resp = resp[:-5]
while resp != END_OF_MESSAGE:
if resp.startswith(OK_MESSAGE):
print('Job {} successfully started.'.format(
resp.replace(OK_MESSAGE, '')))
elif resp.startswith(KO_MESSAGE):
print('Error running job {}.'.format(
resp.replace(KO_MESSAGE, '')))
else:
print('Error: {}'.format(resp))
resp = self._receive()
def do_stop(self, job_name):
"""stop <job_name>
Stop running the <job_name> job.
<job_name> must exist and not being currently stopped."""
self.conn.send(self._build_msg('stop', job_name))
resp = self._receive()
if resp.endswith('.json'):
resp = resp[:-5]
while resp != END_OF_MESSAGE:
if resp.startswith(OK_MESSAGE):
print('Job {} successfully stopped.'.format(
resp.replace(OK_MESSAGE, '')))
elif resp.startswith(KO_MESSAGE):
print('Error stopping job {}.'.format(
resp.replace(KO_MESSAGE, '')))
else:
print('Error: {}'.format(resp))
resp = self._receive()
def do_delete(self, job_name):
"""delete <job_name>
Delete the <job_name> job.
<job_name> must exist and not being currently running."""
self.conn.send(self._build_msg('delete', job_name))
resp = self._receive()
if resp.endswith('.json'):
resp = resp[:-5]
while resp != END_OF_MESSAGE:
if resp.startswith(OK_MESSAGE):
print('Job {} successfully deleted.'.format(
resp.replace(OK_MESSAGE, '')))
elif resp.startswith(KO_MESSAGE):
print('Error deleting job {}.'.format(
resp.replace(KO_MESSAGE, '')))
else:
print('Error: {}'.format(resp))
resp = self._receive()
def do_create(self, custom):
"""create [custom]
Create a new job. A form will ask for all the needed configuration.
The optional [custom] argument lets you introduce a custom query."""
job = self._create_job(custom=custom == CmdTest.CUSTOM)
if job:
self.conn.send(job)
resp = self._receive()
if resp.endswith('.json'):
resp = resp[:-5]
if not resp.startswith(KO_MESSAGE):
print('Job {} successfully created. '
'Would you like to start running it?'.format(resp))
while True:
response = self._prompt("(y/n): ", is_input=True)\
.lower().strip()
if response in ['y', 'n', 'yes', 'no']:
if response[0] == 'y':
self.do_run(resp)
break
print('Error: Enter "(y)es" or "(n)o"')
else:
resp = resp.replace(KO_MESSAGE, '')
if resp:
print(resp)
else:
print('Error creating job. Please, try again.')
def _format_with_default(self, str, default):
return str.format(' ({})'.format(default) if default else '')
def _handle_creation(self, obj, edit=False):
for param in CmdTest.MANDATORY_PARAMS:
if param not in obj:
print("Error: Missing mandatory parameter {}".format(param))
return
while True:
self._prompt("{} job with this configuration?".format(
'Edit' if edit else 'Create'))
print(json.dumps(obj, indent=4))
response = self._prompt("(y/n): ", is_input=True).lower().strip()
if response in ['y', 'n', 'yes', 'no']:
break
print('Error: Enter "(y)es" or "(n)o"')
if response in ['n', 'no']:
return
while True:
default_name = CmdTest.DEFAULT_NAME_TEMPLATE.format(
**obj, time=math.floor(time.time())).replace('.', '_')
default_name_value = obj.get('name', default_name)
name = self._prompt(
self._format_with_default("job name{}: ",
default_name_value),
is_input=True).lower().strip() or default_name_value
self.conn.send(self._build_msg('get', '{}.json'.format(name)))
resp = self._receive()
if not resp.startswith(OK_MESSAGE):
obj['name'] = name
break
print('Error: job "{}" already exists.'.format(name))
return self._build_msg("create", obj)
def _create_job(self, default_config={}, custom=False):
while True:
available_instances = list(CONF['api'].keys())
default_value = default_config.get('instance',
', '.join(available_instances))
instance = self._prompt(
self._format_with_default("API instance{}: ", default_value),
is_input=True).lower().strip() or default_value
if instance in available_instances:
break
print('Error: Instance must be one of {}'.format(
available_instances))
while True:
available_auth_types = ['ks', 't']
auth_type = self._prompt(
self._format_with_default("API auth type{}: ",
'[key + secret(ks), token(t)]'),
is_input=True).lower().strip()
if auth_type in available_auth_types:
break
print('Error: API auth type must be one of {}'.format(
available_auth_types))
if auth_type == 't':
while True:
token = self._prompt(
self._format_with_default("Token: ", None),
is_input=True).strip()
if token:
break
print('Error: Token cannot be undefined')
else:
while True:
key = self._prompt(
self._format_with_default("API Key: ", None),
is_input=True).strip()
if key:
break
print('Error: API Key cannot be undefined')
while True:
secret = self._prompt(
self._format_with_default("API Secret: ", None),
is_input=True).strip()
if secret:
break
print('Error: API Secret cannot be undefined')
if custom:
while True:
default_value = default_config.get('query', None)
query = self._prompt(
self._format_with_default("Query{}: ", default_value),
is_input=True).lower().strip() or default_value
if query:
# check domain
domain = CmdTest.DOMAIN_REGEX.findall(query)
if not domain:
print('Error: Domain not found in query')
continue
domain = domain[0]
# check table
table = CmdTest.TABLE_REGEX.findall(query)
if not table:
print('Error: Table not found in query')
continue
table = table[0]
# where eventdate filters
eventdate_filters = CmdTest.EVENTDATE_REGEX.findall(query)
if eventdate_filters:
print('Error: Query can\'t contain eventdate filters. '
'They will be filled in after the job '
'is created')
continue
# check period
every = CmdTest.EVERY_REGEX.findall(query)
if not every:
print('Error: Period not found in query')
continue
period = every[0][1]
if not PERIOD_REGEX.match(period):
print('Error: Period must match format \\d+[dhms]')
continue
# check host
group_by = CmdTest.BY_REGEX.findall(query)
if group_by:
group_by = group_by[0].split(',')
if len(group_by) > 1:
print('Error: Query must contain only '
'one grouping key')
continue
else:
group_by = group_by[0].strip()
else:
group_by = None
# check count
match = CmdTest.COUNT_REGEX.findall(query)
if not match:
print('Error: Query must contain only '
'one of these aggregation functions: {}'.format(
CmdTest.ALLOWED_AGG_FUNCTIONS))
continue
# check threshold
count, threshold = match[0]
if threshold:
if not NUMBER_REGEX.match(threshold):
print('Error: Threshold must be an integer number')
continue
threshold = int(threshold)
else:
threshold = 0
query = query.replace(every[0][0], '')
break
else:
print('Error: Query cannot be undefined')
else:
while True:
default_value = default_config.get('domain', None)
domain = self._prompt(
self._format_with_default("Domain{}: ", default_value),
is_input=True).lower().strip() or default_value
if domain:
break
print('Error: Domain cannot be undefined')
while True:
default_value = default_config.get('table', None)
table = self._prompt(
self._format_with_default("Table{}: ", default_value),
is_input=True).lower().strip() or default_value
if table:
break
print('Error: Table cannot be undefined')
while | |
vals, vecs, stats):
self.eigenvalues = vals
self.eigenvectors = vecs
self.stats = stats
self.transform = LinearTransform(self.eigenvectors.T, pre=-self.mean)
@property
def mean(self):
return self.stats.mean
@property
def cov(self):
return self.stats.cov
def reduce(self, N=0, **kwargs):
'''Reduces the number of principal components.
Keyword Arguments (one of the following must be specified):
`num` (integer):
Number of eigenvalues/eigenvectors to retain. The top `num`
eigenvalues will be retained.
`eigs` (list):
A list of indices of eigenvalues/eigenvectors to be retained.
`fraction` (float):
The fraction of total image variance to retain. Eigenvalues
will be retained (starting from greatest to smallest) until
`fraction` of total image variance is retained.
'''
status = spy._status
num = kwargs.get('num', None)
eigs = kwargs.get('eigs', None)
fraction = kwargs.get('fraction', None)
if num is not None:
return PrincipalComponents(self.eigenvalues[:num],
self.eigenvectors[:, :num],
self.stats)
elif eigs is not None:
vals = self.eigenvalues[eigs]
vecs = self.eigenvectors[:, eigs]
return PrincipalComponents(vals, vecs, self.stats)
elif fraction is not None:
if not 0 < fraction <= 1:
raise Exception('fraction must be in range (0,1].')
N = len(self.eigenvalues)
cumsum = np.cumsum(self.eigenvalues)
sum = cumsum[-1]
# Count how many values to retain.
for i in range(N):
if (cumsum[i] / sum) >= fraction:
break
if i == (N - 1):
# No reduction
status.write('No reduction in eigenvectors achieved.')
return self
vals = self.eigenvalues[:i + 1]
vecs = self.eigenvectors[:, :i + 1]
return PrincipalComponents(vals, vecs, self.stats)
else:
raise Exception('Must specify one of the following keywords:'
'`num`, `eigs`, `fraction`.')
def denoise(self, X, **kwargs):
'''Returns a de-noised version of `X`.
Arguments:
`X` (np.ndarray):
Data to be de-noised. Can be a single pixel or an image.
Keyword Arguments (one of the following must be specified):
`num` (integer):
Number of eigenvalues/eigenvectors to use. The top `num`
eigenvalues will be used.
`eigs` (list):
A list of indices of eigenvalues/eigenvectors to be used.
`fraction` (float):
The fraction of total image variance to retain. Eigenvalues
will be included (starting from greatest to smallest) until
`fraction` of total image variance is retained.
Returns denoised image data with same shape as `X`.
Note that calling this method is equivalent to calling the
`get_denoising_transform` method with same keyword and applying the
returned transform to `X`. If you only intend to denoise data with the
same parameters multiple times, then it is more efficient to get the
denoising transform and reuse it, rather than calling this method
multilple times.
'''
f = self.get_denoising_transform(**kwargs)
return f(X)
def get_denoising_transform(self, **kwargs):
'''Returns a function for denoising image data.
Keyword Arguments (one of the following must be specified):
`num` (integer):
Number of eigenvalues/eigenvectors to use. The top `num`
eigenvalues will be used.
`eigs` (list):
A list of indices of eigenvalues/eigenvectors to be used.
`fraction` (float):
The fraction of total image variance to retain. Eigenvalues
will be included (starting from greatest to smallest) until
`fraction` of total image variance is retained.
Returns a callable :class:`~spectral.algorithms.transforms.LinearTransform`
object for denoising image data.
'''
V = self.reduce(self, **kwargs).eigenvectors
f = LinearTransform(V.dot(V.T), pre=-self.mean,
post=self.mean)
return f
def principal_components(image):
'''
Calculate Principal Component eigenvalues & eigenvectors for an image.
Usage::
pc = principal_components(image)
Arguments:
`image` (ndarray, :class:`spectral.Image`, :class:`GaussianStats`):
An `MxNxB` image
Returns a :class:`~spectral.algorithms.algorithms.PrincipalComponents`
object with the following members:
`eigenvalues`:
A length B array of eigenvalues
`eigenvectors`:
A `BxB` array of normalized eigenvectors
`stats` (:class:`GaussianStats`):
A statistics object containing `mean`, `cov`, and `nsamples`.
`transform`:
A callable function to transform data to the space of the
principal components.
`reduce`:
A method to reduce the number of eigenvalues.
`denoise`:
A callable function to denoise data using a reduced set of
principal components.
`get_denoising_transform`:
A callable function that returns a function for denoising data.
'''
if isinstance(image, GaussianStats):
stats = image
else:
stats = calc_stats(image)
(L, V) = np.linalg.eig(stats.cov)
# numpy says eigenvalues may not be sorted so we'll sort them, if needed.
if not np.alltrue(np.diff(L) <= 0):
ii = list(reversed(np.argsort(L)))
L = L[ii]
V = V[:, ii]
return PrincipalComponents(L, V, stats)
class FisherLinearDiscriminant:
'''
An object for storing a data set's linear discriminant data. For `C`
classes with `B`-dimensional data, the object has the following members:
`eigenvalues`:
A length `C-1` array of eigenvalues
`eigenvectors`:
A `BxC` array of normalized eigenvectors
`mean`:
The length `B` mean vector of the image pixels (from all classes)
`cov_b`:
The `BxB` matrix of covariance *between* classes
`cov_w`:
The `BxB` matrix of average covariance *within* each class
`transform`:
A callable function to transform data to the space of the
linear discriminant.
'''
def __init__(self, vals, vecs, mean, cov_b, cov_w):
self.eigenvalues = vals
self.eigenvectors = vecs
self.mean = mean
self.cov_b = cov_b
self.cov_w = cov_w
self.transform = LinearTransform(self.eigenvectors.T, pre=-self.mean)
def linear_discriminant(classes, whiten=True):
'''
Solve Fisher's linear discriminant for eigenvalues and eigenvectors.
Usage: (L, V, Cb, Cw) = linear_discriminant(classes)
Arguments:
`classes` (:class:`~spectral.algorithms.TrainingClassSet`):
The set of `C` classes to discriminate.
Returns a `FisherLinearDiscriminant` object containing the within/between-
class covariances, mean vector, and a callable transform to convert data to
the transform's space.
This function determines the solution to the generalized eigenvalue problem
Cb * x = lambda * Cw * x
Since cov_w is normally invertable, the reduces to
(inv(Cw) * Cb) * x = lambda * x
References:
<NAME>. & <NAME>. Remote Sensing Digital Image Analysis: An
Introduction. (Springer: Berlin, 1999).
'''
C = len(classes) # Number of training sets
rank = len(classes) - 1
classes.calc_stats()
# Calculate total # of training pixels and total mean
N = 0
B = classes.nbands
K = len(classes)
mean = np.zeros(B, dtype=np.float64)
for s in classes:
N += s.size()
mean += s.size() * s.stats.mean
mean /= N
cov_b = np.zeros((B, B), np.float64) # cov between classes
cov_w = np.zeros((B, B), np.float64) # cov within classes
for s in classes:
cov_w += ((s.size() - 1) / float(N - 1)) * s.stats.cov
m = s.stats.mean - mean
cov_b += (s.size() / float(N) / (K - 1)) * np.outer(m, m)
inv_cov_w = np.linalg.inv(cov_w)
(vals, vecs) = np.linalg.eig(inv_cov_w.dot(cov_b))
vals = vals[:rank]
vecs = vecs[:, :rank]
if whiten:
# Diagonalize cov_within in the new space
v = vecs.T.dot(cov_w).dot(vecs)
d = np.sqrt(np.diag(v) * np.diag(v).conj())
for i in range(vecs.shape[1]):
vecs[:, i] /= math.sqrt(d[i].real)
return FisherLinearDiscriminant(vals.real, vecs.real, mean, cov_b, cov_w)
# Alias for Linear Discriminant Analysis (LDA)
lda = linear_discriminant
def log_det(x):
return sum(np.log([eigv for eigv in np.linalg.eigvals(x)
if eigv > 0]))
class GaussianStats(object):
'''A class for storing Gaussian statistics for a data set.
Statistics stored include:
`mean`:
Mean vector
`cov`:
Covariance matrix
`nsamples`:
Number of samples used in computing the statistics
Several derived statistics are computed on-demand (and cached) and are
available as property attributes. These include:
`inv_cov`:
Inverse of the covariance
`sqrt_cov`:
Matrix square root of covariance: sqrt_cov.dot(sqrt_cov) == cov
`sqrt_inv_cov`:
Matrix square root of the inverse of covariance
`log_det_cov`:
The log of the determinant of the covariance matrix
`principal_components`:
The principal components of the data, based on mean and cov.
'''
def __init__(self, mean=None, cov=None, nsamples=None, inv_cov=None):
self.cov = cov
self._inv_cov = inv_cov
self.mean = mean
self.nsamples = nsamples
@property
def cov(self):
'''Property method returning the covariance matrix.'''
return self._cov
@cov.setter
def cov(self, C):
self.reset_derived_stats()
self._cov = C
@property
def inv_cov(self):
'''Property method returning the inverse of the covariance matrix.'''
if self._inv_cov is None:
self._inv_cov = np.linalg.inv(self._cov)
return self._inv_cov
def reset_derived_stats(self):
self._cov = self._inv_cov = None
self._sqrt_cov = self._sqrt_inv_cov = self._pcs = None
self._log_det_cov = None
@property
def sqrt_cov(self):
'''Property method returning the matrix square root of the covariance.
If `C` is the covariance, then the returned value is a matrix `S`
such that S.dot(S) == C.
'''
if self._sqrt_cov is None:
pcs = self.principal_components
self._sqrt_cov = matrix_sqrt(eigs=(pcs.eigenvalues,
pcs.eigenvectors),
symmetric=True)
return self._sqrt_cov
@property
def sqrt_inv_cov(self):
'''Property method returning matrix square root of inverse of cov.
If `C` is the covariance, then the returned value is a matrix `S`
such that S.dot(S) == inv(C).
'''
if self._sqrt_inv_cov is None:
pcs = self.principal_components
self._sqrt_inv_cov = matrix_sqrt(eigs=(pcs.eigenvalues,
pcs.eigenvectors),
symmetric=True,
inverse=True)
return self._sqrt_inv_cov
@property
def principal_components(self):
if self._pcs is None:
(evals, evecs) = np.linalg.eigh(self._cov)
self._pcs = PrincipalComponents(evals, evecs, self)
return | |
sentence in summary_sentences:
lcs = _lcs_elements(sentence, reference_sentence)
lcs_union = lcs_union.union(ref_idx for _, ref_idx in lcs)
return lcs_union
def _flatten_and_count_ngrams(sentences, n):
"""
First flatten a list of sentences, then count ngrams on it.
>>> s1 = 'the cat sat on the mat'.split()
>>> s2 = 'the cat on the mat'.split()
>>> _flatten_and_count_ngrams([s1, s2], 1)
Counter({('the',): 4, ('cat',): 2, ('on',): 2, ('mat',): 2, ('sat',): 1})
:param sentences: a list of sentences.
:param n: N for ngrams.
:return: Counter.
"""
return _count_ngrams(_flatten_sentences(sentences), n)
def rouge_l_summary_level(summary_sentences, reference_sentences, alpha=None):
"""
Calculate the summary level ROUGE-L.
:param summary_sentences: a list of sentence.
:param reference_sentences: a list of sentence.
:param alpha: weight on the recall.
:return: a 3-tuple, recall, precision and f1 measure.
"""
summary_unigrams = _flatten_and_count_ngrams(summary_sentences, 1)
reference_unigrams = _flatten_and_count_ngrams(reference_sentences, 1)
total_lcs_hits = 0
for reference in reference_sentences:
lcs_union = _make_lcs_union(summary_sentences, reference)
for word in lcs_union:
unigram = (reference[word],)
if (unigram in summary_unigrams and unigram in reference_unigrams
and summary_unigrams[unigram] > 0 and reference_unigrams[unigram] > 0):
summary_unigrams[unigram] -= 1
reference_unigrams[unigram] -= 1
total_lcs_hits += 1
r_denominator = sum(len(sentence) for sentence in reference_sentences)
p_denominator = sum(len(sentence) for sentence in summary_sentences)
return _f1_measure(total_lcs_hits, r_denominator, p_denominator, alpha)
###############################
# ROUGE-W
###############################
def _weight_fn(x, weight=None, inverse=False):
"""
Implement the polynomial weight function described in the paper.
Y = X^weight and
Y = X^(1 / weight) as the inverse.
>>> _weight_fn(2)
2.2973967099940698
>>> _weight_fn(2, weight=2)
4.0
>>> _weight_fn(2, weight=2, inverse=True)
1.4142135623730951
:param x: Union[int, float].
:param weight: float. Must be greater than 1.0. Default is 1.2.
:param inverse: bool. If true, the inverse is computed
:return: float.
:raise ValueError: if weight is not greater than 1.0.
"""
if weight is None:
weight = DEFAULT_WEIGHT_FACTOR
if not weight > 1.0:
raise ValueError('weight must be > 1.0')
if inverse:
weight = 1 / weight
return math.pow(x, weight)
def _wlcs_elements(x, y, weight=None):
"""
Compute the weighted LCS length.
The weighted LCS length rewards consecutive LCS sequence by its length.
The weight function is so designed that longer consecutive ones get higher score.
:param x: a sequence.
:param y: a sequence.
:param weight: float, the weight factor passed to the weight function.
:return: float.
"""
weighted_len = {}
consecutive_match = {}
trace = {}
m, n = len(x), len(y)
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0: # Corner case.
weighted_len[i, j] = 0
consecutive_match[i, j] = 0
elif x[i - 1] == y[j - 1]:
trace[i, j] = 'd'
k = consecutive_match[i - 1, j - 1]
update = _weight_fn(k + 1, weight) - _weight_fn(k, weight)
weighted_len[i, j] = weighted_len[i - 1, j - 1] + update
consecutive_match[i, j] = k + 1
else:
consecutive_match[i, j] = 0 # No match
if weighted_len[i - 1, j] > weighted_len[i, j - 1]:
trace[i, j] = 'u'
weighted_len[i, j] = weighted_len[i - 1, j]
else:
trace[i, j] = 'l'
weighted_len[i, j] = weighted_len[i, j - 1]
return _compute_lcs_elements(trace, x, y)
def _make_wlcs_union(summary_sentences, reference_sentence):
"""
Like _make_lcs_union() but use _wlcs_elements() to compute elements for
each summary-reference sentence pair. The final result is a sorted list of word indices
of the reference sentence.
:param summary_sentences:
:param reference_sentence:
:return: list.
"""
lcs_union = set()
for sentence in summary_sentences:
lcs = _wlcs_elements(sentence, reference_sentence)
lcs_union = lcs_union.union(ref_idx for _, ref_idx in lcs)
return sorted(lcs_union)
def _divide_and_normalize(n, d, weight):
"""
Divide n by d and normalize the result with the inverse weight function.
Effectively compute: F^-1 (n / d).
:param n: float. numerator.
:param d: float. denominator.
:return: float.
"""
return _weight_fn(_divide_or_zero(n, d), weight=weight, inverse=True)
def rouge_w_summary_level(summary_sentences, reference_sentences, weight=None, alpha=None):
"""
Compute the summary level ROUGE-W.
:param summary_sentences: a list of sentences.
:param reference_sentences: a list of sentences.
:param weight: float, the weight factor passed to the weight function.
:param alpha: weight on the recall.
:return: a 3-tuple, recall, precision and f1 measure.
"""
total_wlcs_hits = 0
# unigrams clippers to ensure the score does not exceed ROUGE-1.
summary_unigrams = _flatten_and_count_ngrams(summary_sentences, 1)
reference_unigrams = _flatten_and_count_ngrams(reference_sentences, 1)
r_denominator = _weight_fn(
sum(_weight_fn(len(sentence), weight=weight) for sentence in reference_sentences),
weight=weight,
)
p_denominator = _weight_fn(
sum(len(sentence) for sentence in summary_sentences),
weight=weight,
)
for reference in reference_sentences:
hit_len = 0
lcs_union = _make_wlcs_union(summary_sentences, reference)
for word in lcs_union:
unigram = (reference[word],)
if (unigram in summary_unigrams
and unigram in reference_unigrams
and summary_unigrams[unigram] > 0
and reference_unigrams[unigram] > 0):
hit_len += 1
# If this is the last word of the sentence
# or the next word is not part of this consecutive lcs, reset the hit-len.
if word == len(reference) - 1 or word + 1 not in lcs_union:
total_wlcs_hits += _weight_fn(hit_len, weight=weight)
hit_len = 0
summary_unigrams[unigram] -= 1
reference_unigrams[unigram] -= 1
recall = _divide_and_normalize(total_wlcs_hits, r_denominator, weight)
precision = _divide_and_normalize(total_wlcs_hits, p_denominator, weight)
f1 = _compute_f1_measure(recall, precision, alpha)
return RougeScore(recall, precision, f1)
def rouge_w_sentence_level(summary_sentence, reference_sentence, weight=None, alpha=None):
"""
Compute the sentence level ROUGE-W.
This is effectively a weighted version of ROUGE-L.
:param summary_sentence: a sentence produced by the system.
:param reference_sentence: a sentence as ground truth.
:param weight: float, the weight factor passed to the weight function.
:param alpha: weight on the recall.
:return: a 3-tuple, recall, precision and f1 measure.
"""
return rouge_w_summary_level([summary_sentence], [reference_sentence], weight, alpha)
###############################
# ROUGE-S
###############################
def _get_skip_bigrams(words, skip_distance=None):
"""
Compute the skip-bigrams of words as an iterator.
Skip-bigram is a pair of words since we are on the special case of n-grams -- Bigram.
Unlike tradition bigram, skip-bigram allows the two words *not* to be consecutive in the sentence.
There can be other words between them being *skipped*.
The number of words being skipped is controlled by the parameter skip_distance.
Skip distance is the maximum number of words allowed to fall in range formed by the two words
of a skip-bigram. If skip distance is 0, that means *no word* should fall between and thus forms
the conventional bigram.
The skip distance exists to avoid arbitrarily long range of words sitting between a skip-gram
since that makes the count less meaningful.
If the skip_distance is negative, it means *set no limit*. Use that for good reasons.
>>> # Use the default skip distance, which is 4.
>>> list(_get_skip_bigrams('abcd'))
[('a', 'b'), ('a', 'c'), ('a', 'd'), ('b', 'c'), ('b', 'd'), ('c', 'd')]
>>> # Limit the skip distance to 1 -- there can at most be one word between the bigrams.
>>> list(_get_skip_bigrams('abcd', skip_distance=1))
[('a', 'b'), ('a', 'c'), ('b', 'c'), ('b', 'd'), ('c', 'd')]
>>> # Example from the paper.
>>> list(_get_skip_bigrams('police killed the gunman'.split()))
[('police', 'killed'), ('police', 'the'), ('police', 'gunman'), ('killed', 'the'), ('killed', 'gunman'), ('the', 'gunman')]
:param words: a list of tokens.
:param skip_distance: The maximum number of words allowed to be skipped.
:return: a iterator to the skip-bigrams.
"""
if skip_distance is None:
skip_distance = DEFAULT_SKIP_DISTANCE
for i, word in enumerate(words):
for j in range(i + 1, len(words)):
if skip_distance < 0 or j - i - 1 <= skip_distance:
yield word, words[j]
def _count_skip_bigrams(words, skip_distance=None):
"""
Return a Counter counting the skip-bigrams of words.
:param words: a list of tokens.
:param skip_distance: The maximum number of words allowed to be skipped.
:return: collections.Counter.
"""
return collections.Counter(_get_skip_bigrams(words, skip_distance))
def rouge_s_sentence_level(summary_sentence, reference_sentence, skip_distance=None, alpha=None):
"""
Compute sentence level ROUGE-S.
:param summary_sentence:
:param reference_sentence:
:param skip_distance:
:param alpha:
:return:
"""
summary_skip_bigrams = _count_skip_bigrams(summary_sentence, skip_distance)
reference_skip_bigrams = _count_skip_bigrams(reference_sentence, skip_distance)
hits = _clipped_ngram_count(summary_skip_bigrams, reference_skip_bigrams)
return _f1_measure(
numerator=hits,
r_denominator=sum(reference_skip_bigrams.values()),
p_denominator=sum(summary_skip_bigrams.values()),
alpha=alpha,
)
def rouge_s_summary_level(summary_sentences, reference_sentences, skip_distance=None, alpha=None):
"""
Compute summary level ROUGE-S.
:param summary_sentences:
:param reference_sentences:
:param skip_distance:
:param alpha:
:return:
"""
return rouge_s_sentence_level(
summary_sentence=_flatten_sentences(summary_sentences),
reference_sentence=_flatten_sentences(reference_sentences),
skip_distance=skip_distance,
alpha=alpha,
)
def rouge_l_score(y_true, y_pred, i2w, alpha=None):
seq_len = len(y_true)
total_r_score = 0
total_p_score = 0
total_f_score = 0
for y_true_seq, y_pred_seq in zip(y_true, y_pred):
#references = [i2w[str(int(index))] for index in y_true_seq]
#hypothesis = [i2w[str(int(index))] for index in y_pred_seq]
references = [i2w[index] for index in y_true_seq]
hypothesis = [i2w[index] for index in y_pred_seq]
references = " ".join(references)
hypothesis = " ".join(hypothesis)
r_score, p_score, f_score = rouge_l_sentence_level(references, hypothesis, alpha)
total_r_score += | |
<filename>mayachemtools/bin/RDKitGenerateMolecularFrameworks.py
#!/bin/env python
#
# File: RDKitGenerateMolecularFrameworks.py
# Author: <NAME> <<EMAIL>>
#
# Copyright (C) 2020 <NAME>. All rights reserved.
#
# The functionality available in this script is implemented using RDKit, an
# open source toolkit for cheminformatics developed by <NAME>.
#
# This file is part of MayaChemTools.
#
# MayaChemTools is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option) any
# later version.
#
# MayaChemTools is distributed in the hope that it will be useful, but without
# any warranty; without even the implied warranty of merchantability of fitness
# for a particular purpose. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with MayaChemTools; if not, see <http://www.gnu.org/licenses/> or
# write to the Free Software Foundation Inc., 59 Temple Place, Suite 330,
# Boston, MA, 02111-1307, USA.
#
from __future__ import print_function
# Add local python path to the global path and import standard library modules...
import os
import sys; sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), "..", "lib", "Python"))
import time
import re
import csv
# RDKit imports...
try:
from rdkit import rdBase
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.Scaffolds import MurckoScaffold
except ImportError as ErrMsg:
sys.stderr.write("\nFailed to import RDKit module/package: %s\n" % ErrMsg)
sys.stderr.write("Check/update your RDKit environment and try again.\n\n")
sys.exit(1)
# MayaChemTools imports...
try:
from docopt import docopt
import MiscUtil
import RDKitUtil
except ImportError as ErrMsg:
sys.stderr.write("\nFailed to import MayaChemTools module/package: %s\n" % ErrMsg)
sys.stderr.write("Check/update your MayaChemTools environment and try again.\n\n")
sys.exit(1)
ScriptName = os.path.basename(sys.argv[0])
Options = {}
OptionsInfo = {}
def main():
"""Start execution of the script"""
MiscUtil.PrintInfo("\n%s (RDK v%s; %s): Starting...\n" % (ScriptName, rdBase.rdkitVersion, time.asctime()))
(WallClockTime, ProcessorTime) = MiscUtil.GetWallClockAndProcessorTime()
# Retrieve command line arguments and options...
RetrieveOptions()
# Process and validate command line arguments and options...
ProcessOptions()
# Perform actions required by the script...
GenerateMolecularFrameworks()
MiscUtil.PrintInfo("\n%s: Done...\n" % ScriptName)
MiscUtil.PrintInfo("Total time: %s" % MiscUtil.GetFormattedElapsedTime(WallClockTime, ProcessorTime))
def GenerateMolecularFrameworks():
"""Generate Bemis Murcko molecular framworks."""
Infile = OptionsInfo["Infile"]
Outfile = OptionsInfo["Outfile"]
UseChirality = OptionsInfo["UseChirality"]
RemoveDuplicateFrameworks = OptionsInfo["RemoveDuplicateFrameworks"]
UseGraphFrameworks = OptionsInfo["UseGraphFrameworks"]
SortFrameworks = OptionsInfo["SortFrameworks"]
if SortFrameworks:
FrameworkMolIDs = []
FrameworkMolIDToMolMap = {}
FrameworkMolIDToAtomCountMap = {}
DuplicateFrameworkMolIDs = []
DuplicateFrameworkMolIDToMolMap = {}
DuplicateFrameworkMolIDToAtomCountMap = {}
DuplicatesOutfile = ""
if RemoveDuplicateFrameworks:
DuplicatesOutfile = OptionsInfo["DuplicatesOutfile"]
# Setup a molecule reader...
MiscUtil.PrintInfo("\nProcessing file %s..." % Infile)
Mols = RDKitUtil.ReadMolecules(Infile, **OptionsInfo["InfileParams"])
# Set up a molecular framework writer...
Writer = RDKitUtil.MoleculesWriter(Outfile, **OptionsInfo["OutfileParams"])
if Writer is None:
MiscUtil.PrintError("Failed to setup a writer for output fie %s " % Outfile)
# Set up a duplicate molecular framework writer...
if RemoveDuplicateFrameworks:
DuplicatesWriter = RDKitUtil.MoleculesWriter(DuplicatesOutfile, **OptionsInfo["OutfileParams"])
if Writer is None:
MiscUtil.PrintError("Failed to setup a writer for duplicates output fie %s " % DuplicatesOutfile)
if RemoveDuplicateFrameworks:
MiscUtil.PrintInfo("Generating files: %s and %s..." % (Outfile, DuplicatesOutfile))
else:
MiscUtil.PrintInfo("Generating file %s..." % Outfile)
# Process molecules...
MolCount = 0
ValidMolCount = 0
FrameworksCount = 0
UniqueFrameworksCount = 0
DuplicateFrameworksCount = 0
CanonicalSMILESMap = {}
Compute2DCoords = OptionsInfo["OutfileParams"]["Compute2DCoords"]
for Mol in Mols:
MolCount += 1
if Mol is None:
continue
if RDKitUtil.IsMolEmpty(Mol):
MolName = RDKitUtil.GetMolName(Mol, MolCount)
MiscUtil.PrintWarning("Ignoring empty molecule: %s" % MolName)
continue
ValidMolCount += 1
if UseGraphFrameworks:
FrameworksMol = MurckoScaffold.MakeScaffoldGeneric(Mol)
else:
FrameworksMol = MurckoScaffold.GetScaffoldForMol(Mol)
if Compute2DCoords:
AllChem.Compute2DCoords(FrameworksMol)
if SortFrameworks:
HeavyAtomCount = FrameworksMol.GetNumHeavyAtoms()
FrameworksCount += 1
if RemoveDuplicateFrameworks:
CanonicalSMILES = Chem.MolToSmiles(FrameworksMol, isomericSmiles = UseChirality, canonical = True)
if CanonicalSMILES in CanonicalSMILESMap:
DuplicateFrameworksCount += 1
if SortFrameworks:
# Track duplicate frameworks...
DuplicateFrameworkMolIDs.append(DuplicateFrameworksCount)
DuplicateFrameworkMolIDToMolMap[DuplicateFrameworksCount] = FrameworksMol
DuplicateFrameworkMolIDToAtomCountMap[DuplicateFrameworksCount] = HeavyAtomCount
else:
# Write it out...
DuplicatesWriter.write(FrameworksMol)
else:
UniqueFrameworksCount += 1
CanonicalSMILESMap[CanonicalSMILES] = CanonicalSMILES
if SortFrameworks:
# Track unique frameworks...
FrameworkMolIDs.append(UniqueFrameworksCount)
FrameworkMolIDToMolMap[UniqueFrameworksCount] = FrameworksMol
FrameworkMolIDToAtomCountMap[UniqueFrameworksCount] = HeavyAtomCount
else:
# Write it out...
Writer.write(FrameworksMol)
elif SortFrameworks:
# Track for sorting...
FrameworkMolIDs.append(FrameworksCount)
FrameworkMolIDToMolMap[FrameworksCount] = FrameworksMol
FrameworkMolIDToAtomCountMap[FrameworksCount] = HeavyAtomCount
else:
# Write it out...
Writer.write(FrameworksMol)
if SortFrameworks:
ReverseOrder = OptionsInfo["DescendingSortOrder"]
SortAndWriteFrameworks(Writer, FrameworkMolIDs, FrameworkMolIDToMolMap, FrameworkMolIDToAtomCountMap, ReverseOrder)
if RemoveDuplicateFrameworks:
SortAndWriteFrameworks(DuplicatesWriter, DuplicateFrameworkMolIDs, DuplicateFrameworkMolIDToMolMap, DuplicateFrameworkMolIDToAtomCountMap, ReverseOrder)
Writer.close()
if RemoveDuplicateFrameworks:
DuplicatesWriter.close()
MiscUtil.PrintInfo("\nTotal number of molecular frameworks: %d" % FrameworksCount)
if RemoveDuplicateFrameworks:
MiscUtil.PrintInfo("Number of unique molecular frameworks: %d" % UniqueFrameworksCount)
MiscUtil.PrintInfo("Number of duplicate molecular frameworks: %d" % DuplicateFrameworksCount)
MiscUtil.PrintInfo("\nTotal number of molecules: %d" % MolCount)
MiscUtil.PrintInfo("Number of valid molecules: %d" % ValidMolCount)
MiscUtil.PrintInfo("Number of ignored molecules: %d" % (MolCount - ValidMolCount))
def SortAndWriteFrameworks(MolWriter, MolIDs, MolIDToMolMap, MolIDToAtomCountMap, ReverseOrder):
"""Sort frameworks and write them out."""
SortedMolIDs = sorted(MolIDs, key = lambda MolID: MolIDToAtomCountMap[MolID], reverse = ReverseOrder)
for MolID in SortedMolIDs:
FrameworksMol = MolIDToMolMap[MolID]
MolWriter.write(FrameworksMol)
def ProcessOptions():
"""Process and validate command line arguments and options"""
MiscUtil.PrintInfo("Processing options...")
# Validate options...
ValidateOptions()
OptionsInfo["Infile"] = Options["--infile"]
OptionsInfo["InfileParams"] = MiscUtil.ProcessOptionInfileParameters("--infileParams", Options["--infileParams"], Options["--infile"])
OptionsInfo["Outfile"] = Options["--outfile"]
OptionsInfo["OutfileParams"] = MiscUtil.ProcessOptionOutfileParameters("--outfileParams", Options["--outfileParams"], Options["--infile"], Options["--outfile"])
OptionsInfo["Overwrite"] = Options["--overwrite"]
OptionsInfo["Mode"] = Options["--mode"]
OptionsInfo["UseGraphFrameworks"] = False
if re.match("^GraphFrameworks$", OptionsInfo["Mode"], re.I):
OptionsInfo["UseGraphFrameworks"] = True
OptionsInfo["RemoveDuplicates"] = Options["--removeDuplicates"]
OptionsInfo["RemoveDuplicateFrameworks"] = False
if re.match("^Yes$", OptionsInfo["RemoveDuplicates"], re.I):
OptionsInfo["RemoveDuplicateFrameworks"] = True
# Setup outfile for writing out duplicates...
OptionsInfo["DuplicatesOutfile"] = ""
if OptionsInfo["RemoveDuplicateFrameworks"]:
FileDir, FileName, FileExt = MiscUtil.ParseFileName(OptionsInfo["Outfile"])
OptionsInfo["DuplicatesOutfile"] = "%sDuplicates.%s" % (FileName, FileExt)
OptionsInfo["Sort"] = Options["--sort"]
OptionsInfo["SortFrameworks"] = False
if re.match("^Yes$", OptionsInfo["Sort"], re.I):
OptionsInfo["SortFrameworks"] = True
OptionsInfo["SortOrder"] = Options["--sortOrder"]
OptionsInfo["DescendingSortOrder"] = False
if re.match("^Descending$", OptionsInfo["SortOrder"], re.I):
OptionsInfo["DescendingSortOrder"] = True
OptionsInfo["UseChirality"] = False
if re.match("^yes$", Options["--useChirality"], re.I):
OptionsInfo["UseChirality"] = True
def RetrieveOptions():
"""Retrieve command line arguments and options"""
# Get options...
global Options
Options = docopt(_docoptUsage_)
# Set current working directory to the specified directory...
WorkingDir = Options["--workingdir"]
if WorkingDir:
os.chdir(WorkingDir)
# Handle examples option...
if "--examples" in Options and Options["--examples"]:
MiscUtil.PrintInfo(MiscUtil.GetExamplesTextFromDocOptText(_docoptUsage_))
sys.exit(0)
def ValidateOptions():
"""Validate option values"""
MiscUtil.ValidateOptionFilePath("-i, --infile", Options["--infile"])
MiscUtil.ValidateOptionFileExt("-i, --infile", Options["--infile"], "sdf sd smi txt csv tsv")
MiscUtil.ValidateOptionFileExt("-o, --outfile", Options["--outfile"], "sdf sd smi")
MiscUtil.ValidateOptionsOutputFileOverwrite("-o, --outfile", Options["--outfile"], "--overwrite", Options["--overwrite"])
MiscUtil.ValidateOptionsDistinctFileNames("-i, --infile", Options["--infile"], "-o, --outfile", Options["--outfile"])
MiscUtil.ValidateOptionTextValue("-m, --mode", Options["--mode"], "GraphFrameworks AtomicFrameworks")
MiscUtil.ValidateOptionTextValue("-r, --removeDuplicates", Options["--removeDuplicates"], "yes no")
MiscUtil.ValidateOptionTextValue("-s, --sort", Options["--sort"], "yes no")
MiscUtil.ValidateOptionTextValue("--sortOrder", Options["--sortOrder"], "ascending descending")
MiscUtil.ValidateOptionTextValue("--useChirality", Options["--useChirality"], "yes no")
# Setup a usage string for docopt...
_docoptUsage_ = """
RDKitGenerateMolecularFrameworks.py - Generate Bemis Murcko molecular frameworks
Usage:
RDKitGenerateMolecularFrameworks.py [--infileParams <Name,Value,...>]
[--mode <GraphFrameworks or AtomicFrameworks> ]
[ --outfileParams <Name,Value,...> ] [--overwrite] [--removeDuplicates <yes or no>]
[--sort <yes or no>] [--sortOrder <ascending or descending>]
[--useChirality <yes or no>] [-w <dir>] -i <infile> -o <outfile>
RDKitGenerateMolecularFrameworks.py -h | --help | -e | --examples
Description:
Generate Bemis Murcko [ Ref 133 ] molecular frameworks for molecules. Two types of molecular
frameworks can be generated: Graph or atomic frameworks. The graph molecular framework
is a generic framework. The atom type, hybridization, and bond order is ignore during its
generation. All atoms are set to carbon atoms and all bonds are single bonds. The atom type,
hybridization, and bond order is preserved during generation of atomic molecular frameworks.
The supported input file formats are: SD (.sdf, .sd), SMILES (.smi, .csv, .tsv, .txt)
The supported output file formats are: SD (.sdf, .sd), SMILES (.smi)
Options:
-e, --examples
Print examples.
-h, --help
Print this help message.
-i, --infile <infile>
Input file name.
--infileParams <Name,Value,...> [default: auto]
A comma delimited list of parameter name and value pairs for reading
molecules from files. The supported parameter names for different file
formats, along with their default values, are shown below:
SD: removeHydrogens,yes,sanitize,yes,strictParsing,yes
SMILES: smilesColumn,1,smilesNameColumn,2,smilesDelimiter,space,
smilesTitleLine,auto,sanitize,yes
Possible values for smilesDelimiter: space, comma or tab.
-m, --mode <GraphFrameworks or AtomicFrameworks> [default: GraphFrameworks]
Type of molecular frameworks to generate for molecules. Possible values: GraphFrameworks
or AtomicFrameworks. The graph molecular framework is a generic framework. The atom type,
hybridization, and bond order is ignore during its generation. All atoms are set to carbon atoms
and all bonds are single bonds. The atom type, hybridization, and bond order is preserved
during the generation of atomic molecular frameworks.
-o, --outfile <outfile>
Output file name.
--outfileParams <Name,Value,...> [default: auto]
A comma delimited list of parameter name and value pairs for writing
molecules to files. The supported parameter names | |
something else, set the notes attribute.
elif response:
entry.notes = response
# end if
return 1
except Exception as err:
_z_exc("wl_add.py/add_note", err)
# end try
# end function
def add_recurrance(wl_obj, entry):
"""
Sets the recurrance of a task if the user so chooses.
Arguments:
- wl_obj -- the work log object
- entry -- the log entry object
Returns: 1 if successful, 0 if the user chooses to abort, or
-1 if the user chooses to go back. If the user wants the task
to be recurring, a list of entry objects.
-----------------------------------------------------------------
"""
try:
# Run in a loop until a valid response is obtained (function
# exits immediately then).
while True:
# Refresh the entry's attributes.
_screen_reset(wl_obj, entry, wl_obj.total_entries + 1)
# Print the appropriate help text.
wl_obj.help.print_help(
wl_obj.show_help, "Task Recurrance", "_ah_recurrance1",
line_length=wl_obj.line_length)
# Print navigational instructions.
wl_resource.print_nav(q=True, b=True)
# Print the prompt and get the response. (Don't use the
# yes_no function because the user can also toggle help or
# exit/go back here.)
response = io_utils.get_input(
"Do you want this task to be recurring? [Y/N]:")
# If the user chose to abort, exit immediately.
if re.match(r"-q", response, re.I):
return 0, []
# If the user chose to go back, also exit.
elif re.match(r"-b", response, re.I):
return -1, []
# If the user chose to toggle help, do that.
elif re.match(r"-h", response, re.I):
wl_obj.show_help = not(wl_obj.show_help)
continue
# If the user chose No, set the attribute and return.
elif response[0].lower() == "n":
entry.recurring = False
return 1, []
# If the user chose Yes, set the attribute and go the
# function that determines recurrances.
elif response[0].lower() == "y":
entry.recurring = True
rec_list = _find_recurrances(wl_obj, entry)
# If recurrances were successfully determined, return
# the list of entries.
if rec_list:
return 1, rec_list
# If the user decided not to set recurrances, reset the
# attributes and return.
else:
entry.recurring = False
for key in entry.rec_interval:
entry.rec_interval[key] = None
# end for
return 1, []
# end if
# Otherwise print an error message and try again.
else:
io_utils.print_status(
"Error", f"{response} was not a valid response.",
line_length=wl_obj.line_length)
# end if
# end while
except Exception as err:
_z_exc("wl_add.py/add_recurrance", err)
# end try
# end function
def add_time(wl_obj, entry, edit=False):
"""
Sets the time for a task from user input.
Arguments:
- wl_obj -- the work log object
- entry -- the log entry object
Keyword Arguments:
- edit -- edit an existing entry (default False)
Returns: 1 if successful, 0 if the user chooses to abort, or
-1 if the user chooses to go back.
-----------------------------------------------------------------
"""
try:
# Run in a loop until a valid time is obtained.
valid = False
while not valid:
# Refresh the entry's attributes.
if edit:
_screen_reset_edit(wl_obj, entry)
else:
_screen_reset(wl_obj, entry, wl_obj.total_entries + 1)
# end if
# Print the appropriate help text.
wl_obj.help.print_help(
wl_obj.show_help, "Task Time", "_ah_time",
line_length=wl_obj.line_length)
# Print navigational instructions.
if not edit:
wl_resource.print_nav(q=True, b=True)
# end if
# Build prompt.
if edit:
prompt = (
"Please enter a new start time for this task, or press " +
"[ENTER] to leave the start time unchanged:")
else:
prompt = "Please enter the start time for this task:"
# end if
# Print the prompt and get the response.
response = io_utils.get_input(prompt, must_respond=False)
# If editing and the user didn't enter anything, return.
if edit and response == "":
return 0
# end if
# If the user chose to abort, exit immediately.
if re.match(r"-q", response, re.I):
return 0
# If the user chose to go back, also exit.
elif re.match(r"-b", response, re.I):
return -1
# If the user chose to toggle help, do that.
elif re.match(r"-h", response, re.I):
wl_obj.show_help = not(wl_obj.show_help)
continue
# If the user didn't enter anything (and isn't editing),
# show an error message and loop back.
elif response == "":
io_utils.print_status(
"Error", "You did not enter anything.",
line_length=wl_obj.line_length)
continue
# end if
# Otherwise parse the string to determine the time.
else:
entry_time = wl_datetime.parse_time_input(wl_obj, response)
# end if
if entry_time:
valid = True
else:
msg = f"{response} could not be interpreted as a valid time."
io_utils.print_status(
"Error", msg, line_length=wl_obj.line_length)
# end if
# Set the entry's start time.
entry.time = entry_time
return 1
except Exception as err:
_z_exc("wl_add.py/add_time", err)
# end try
# end function
def add_title(wl_obj, entry, edit=False):
"""
Sets the title for a task from user input.
Arguments:
- wl_obj -- the work log object
- entry -- the log entry object
Keyword Arguments:
- edit -- edit an existing entry (default False)
Returns: 1 if successful, or 0 if the user chooses to abort.
-----------------------------------------------------------------
"""
try:
# Run in a loop until a valid title is obtained.
valid = False
while not valid:
# Refresh the entry's attributes.
if edit:
_screen_reset_edit(wl_obj, entry)
else:
_screen_reset(wl_obj, entry, wl_obj.total_entries + 1)
# end if
# Print the appropriate help text.
wl_obj.help.print_help(
wl_obj.show_help, "Task Title", "_ah_title",
line_length=wl_obj.line_length)
# Print navigational instructions.
if not edit:
wl_resource.print_nav(q=True, b=False)
# end if
# Build prompt.
if edit:
prompt = (
"Please enter a new title for this task, or press " +
"[ENTER] to leave the title unchanged:")
else:
prompt = "Please enter the title for this task:"
# end if
# Print the prompt and get the response.
response = io_utils.get_input(prompt, must_respond=False)
# If editing and the user didn't enter anything, return.
if edit and response == "":
return 0
# end if
# If the user chose to abort, exit immediately.
if re.match(r"-q", response, re.I):
return 0
# If the user chose to toggle help, do that.
elif re.match(r"-h", response, re.I):
wl_obj.show_help = not(wl_obj.show_help)
continue
# If the user chose to toggle help, do that.
elif (
(wl_obj.show_help is True and response.lower() == "-help off") or
(wl_obj.show_help is False and response.lower() == "-help on")):
wl_obj.show_help = not(wl_obj.show_help)
continue
# The task title can be any string, as long as it's not
# empty.
elif response != "":
# Set the attribute and exit the loop.
entry.title = response
valid = True
else:
# Print the error message and prompt to try again.
io_utils.print_status(
"Error", "You did not enter anything.",
line_length=wl_obj.line_length)
# end if
# end while
return 1
except Exception as err:
_z_exc("wl_add.py/add_title", err)
# end try
# end function
def _check_dates_input(string):
"""
Tries to convert input from _get_dates into an integer.
Arguments:
- string -- the string to check.
Returns: an integer if successful, else None.
-----------------------------------------------------------------
"""
try:
# First try a simple type conversion.
try:
return int(string)
except Exception:
pass
# end try
# Next, see if it's a number word.
ret_int = wl_resource.cardinal(string)
if type(ret_int) == int:
return ret_int
# end if
# Next, see if it's an ordinal word.
ret_int = wl_resource.ordinal(string)
if ret_int:
return ret_int
# end if
# Finally, check to see if it's a phrase relative to the end of
# the month.
if re.search(r"day( of the month)?$", string, re.I):
if re.match(r"last", string, re.I):
return -1
elif re.match(r"(next)", string, re.I):
return -2
else:
ret_int = wl_resource.ordinal(re.match(r"\S+", string).group())
if ret_int:
return ret_int * -1
# end if
# end if
# end if
# If nothing worked, return None.
return None
except Exception as err:
_z_exc("wl_add.py/_check_dates_input", err)
# end try
# end function
def _find_recurrances(wl_obj, entry):
"""
Allows the user to input how frequently a task will recur.
Arguments:
- wl_obj -- the work log object.
- entry -- the log entry object.
Returns: A list of entry objects, or an empty list if the user
aborts the process.
-----------------------------------------------------------------
"""
try:
# Loop until user aborts or a valid recurrance is set.
valid = False
while not valid:
# Reset the screen.
_screen_reset(wl_obj, entry, wl_obj.total_entries + 1)
# See if the user wants a regular occurrance.
io_utils.print_block(wl_obj.help.help_list["_ah_recurrance2"])
response = io_utils.menu(
["Daily", "Weekly", "Monthly", "Other"], keystroke_list="#",
line_length=wl_obj.line_length)
| |
"""Specific Pysyft exceptions."""
from tblib import Traceback
import traceback
from six import reraise
from typing import Tuple
import syft as sy
from syft.generic.frameworks.types import FrameworkTensor
class DependencyError(Exception):
def __init__(self, package, pypi_alias=None):
if pypi_alias is None:
pypi_alias = package
message = (
f"The {package} dependency is not installed. If you intend"
" to use it, please install it at your command line with "
f"`pip install {pypi_alias}`."
)
super().__init__(message)
class PureFrameworkTensorFoundError(BaseException):
"""Exception raised for errors in the input.
This error is used in a recursive analysis of the args provided as an
input of a function, to break the recursion if a FrameworkTensor is found
as it means that _probably_ all the tensors are pure torch/tensorflow and
the function can be applied natively on this input.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
pass
class RemoteObjectFoundError(BaseException):
"""Exception raised for errors in the input.
This error is used in a context similar to PureFrameworkTensorFoundError but
to indicate that a Pointer to a remote tensor was found in the input
and thus that the command should be send elsewhere. The pointer retrieved
by the error gives the location where the command should be sent.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, pointer):
self.pointer = pointer
class InvalidTensorForRemoteGet(Exception):
"""Raised when a chain of pointer tensors is not provided for `remote_get`."""
def __init__(self, tensor: object):
message = f"Tensor does not have attribute child. You remote get should be called on a chain of pointer tensors, instead you called it on {tensor}."
super().__init__(message)
class WorkerNotFoundException(Exception):
"""Raised when a non-existent worker is requested."""
pass
class CompressionNotFoundException(Exception):
"""Raised when a non existent compression/decompression scheme is requested."""
pass
class CannotRequestObjectAttribute(Exception):
"""Raised when .get() is called on a pointer which points to an attribute of
another object."""
pass
class TensorsNotCollocatedException(Exception):
"""Raised when a command is executed on two tensors which are not
on the same machine. The goal is to provide as useful input as possible
to help the user identify which tensors are where so that they can debug
which one needs to be moved."""
def __init__(self, tensor_a, tensor_b, attr="a method"):
if hasattr(tensor_a, "child") and tensor_a.is_wrapper:
tensor_a = tensor_a.child
if hasattr(tensor_b, "child") and tensor_b.is_wrapper:
tensor_b = tensor_b.child
if isinstance(tensor_a, sy.PointerTensor) and isinstance(tensor_b, sy.PointerTensor):
message = (
"You tried to call "
+ attr
+ " involving two tensors which"
+ " are not on the same machine! One tensor is on "
+ str(tensor_a.location)
+ " while the other is on "
+ str(tensor_b.location)
+ ". Use a combination of .move(), .get(), and/or .send() to co-locate them to the same machine."
)
elif isinstance(tensor_a, sy.PointerTensor):
message = (
"You tried to call "
+ attr
+ " involving two tensors where one tensor is actually located"
+ " on another machine (is a PointerTensor). Call .get() on the PointerTensor or .send("
+ str(tensor_a.location.id)
+ ") on the other tensor.\n"
+ "\nTensor A: "
+ str(tensor_a)
+ "\nTensor B: "
+ str(tensor_b)
)
elif isinstance(tensor_b, sy.PointerTensor):
message = (
"You tried to call "
+ attr
+ " involving two tensors where one tensor is actually located"
+ " on another machine (is a PointerTensor). Call .get() on the PointerTensor or .send("
+ str(tensor_b.location.id)
+ ") on the other tensor.\n"
+ "\nTensor A: "
+ str(tensor_a)
+ "\nTensor B: "
+ str(tensor_b)
)
else:
message = (
"You tried to call "
+ attr
+ " involving two tensors which are not on the same machine."
+ "Try calling .send(), .move(), and/or .get() on these tensors to get them to the same"
+ "worker before calling methods that involve them working together."
)
super().__init__(message)
self.tensor_a = tensor_a
self.tensor_b = tensor_b
class ResponseSignatureError(Exception):
"""Raised when the return of a hooked function is not correctly predicted
(when defining in advance ids for results)
"""
def __init__(self, ids_generated=None):
self.ids_generated = ids_generated
def get_attributes(self):
"""
Specify all the attributes need to report an error correctly.
"""
return {"ids_generated": self.ids_generated}
@staticmethod
def simplify(worker: "sy.workers.AbstractWorker", e):
"""
Serialize information about an Exception which was raised to forward it
"""
# Get information about the exception: type of error, traceback
tp = type(e)
tb = e.__traceback__
# Serialize the traceback
traceback_str = "Traceback (most recent call last):\n" + "".join(traceback.format_tb(tb))
# Include special attributes if relevant
try:
attributes = e.get_attributes()
except AttributeError:
attributes = {}
return tp.__name__, traceback_str, sy.serde._simplify(worker, attributes)
@staticmethod
def detail(worker: "sy.workers.AbstractWorker", error_tuple: Tuple[str, str, dict]):
"""
Detail and re-raise an Exception forwarded by another worker
"""
error_name, traceback_str, attributes = error_tuple
error_name, traceback_str = error_name.decode("utf-8"), traceback_str.decode("utf-8")
attributes = sy.serde._detail(worker, attributes)
# De-serialize the traceback
tb = Traceback.from_string(traceback_str)
# Check that the error belongs to a valid set of Exceptions
if error_name in dir(sy.exceptions):
error_type = getattr(sy.exceptions, error_name)
error = error_type()
# Include special attributes if any
for attr_name, attr in attributes.items():
setattr(error, attr_name, attr)
reraise(error_type, error, tb.as_traceback())
else:
raise ValueError(f"Invalid Exception returned:\n{traceback_str}")
class GetNotPermittedError(Exception):
"""Raised when calling get on a pointer to a tensor which does not allow
get to be called on it. This can happen do to sensitivity being too high"""
@staticmethod
def simplify(worker: "sy.workers.AbstractWorker", e):
"""
Serialize information about an Exception which was raised to forward it
"""
# Get information about the exception: type of error, traceback
tp = type(e)
tb = e.__traceback__
# Serialize the traceback
traceback_str = "Traceback (most recent call last):\n" + "".join(traceback.format_tb(tb))
# Include special attributes if relevant
try:
attributes = e.get_attributes()
except AttributeError:
attributes = {}
return tp.__name__, traceback_str, sy.serde._simplify(worker, attributes)
@staticmethod
def detail(worker: "sy.workers.AbstractWorker", error_tuple: Tuple[str, str, dict]):
"""
Detail and re-raise an Exception forwarded by another worker
"""
error_name, traceback_str, attributes = error_tuple
error_name, traceback_str = error_name.decode("utf-8"), traceback_str.decode("utf-8")
attributes = sy.serde._detail(worker, attributes)
# De-serialize the traceback
tb = Traceback.from_string(traceback_str)
# Check that the error belongs to a valid set of Exceptions
if error_name in dir(sy.exceptions):
error_type = getattr(sy.exceptions, error_name)
error = error_type()
# Include special attributes if any
for attr_name, attr in attributes.items():
setattr(error, attr_name, attr)
reraise(error_type, error, tb.as_traceback())
else:
raise ValueError(f"Invalid Exception returned:\n{traceback_str}")
class IdNotUniqueError(Exception):
"""Raised by the ID Provider when setting ids that have already been generated"""
pass
class PlanCommandUnknownError(Exception):
"""Raised when an unknown plan command execution is requested."""
def __init__(self, command_name: object):
message = f"Command {command_name} is not implemented."
super().__init__(message)
class ObjectNotFoundError(Exception):
"""Raised when object with given object id is not found on worker
Attributes:
obj_id -- id of the object with which the interaction is attempted
worker -- virtual worker on which the interaction is attempted
"""
def __init__(self, obj_id, worker):
message = ""
message += 'Object "' + str(obj_id) + '" not found on worker!!! '
message += (
"You just tried to interact with an object ID:"
+ str(obj_id)
+ " on "
+ str(worker)
+ " which does not exist!!! "
)
message += (
"Use .send() and .get() on all your tensors to make sure they're"
"on the same machines. "
"If you think this tensor does exist, check the ._objects dictionary"
"on the worker and see for yourself!!! "
"The most common reason this error happens is because someone calls"
".get() on the object's pointer without realizing it (which deletes "
"the remote object and sends it to the pointer). Check your code to "
"make sure you haven't already called .get() on this pointer!!!"
)
super().__init__(message)
class InvalidProtocolFileError(Exception):
"""Raised when PySyft protocol file cannot be loaded."""
pass
class UndefinedProtocolTypeError(Exception):
"""Raised when trying to serialize type that is not defined in protocol file."""
pass
class UndefinedProtocolTypePropertyError(Exception):
"""Raised when trying to get protocol type property that is not defined in protocol file."""
pass
def route_method_exception(exception, self, args, kwargs):
try:
if self.is_wrapper:
if isinstance(self.child, sy.PointerTensor):
if len(args) > 0:
if not args[0].is_wrapper:
return TensorsNotCollocatedException(self, args[0])
elif isinstance(args[0].child, sy.PointerTensor):
if self.location != args[0].child.location:
return TensorsNotCollocatedException(self, args[0])
# if self is a normal tensor
elif isinstance(self, FrameworkTensor):
if len(args) > | |
from numpy import mean
import pandas as pd
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plot
import matplotlib.mlab as mlab
import matplotlib.pylab as lab
import matplotlib.patches as patches
import matplotlib.ticker as plticker
from matplotlib import rcParams
from matplotlib import gridspec
from matplotlib import cm
import sys
import time
rcParams['font.sans-serif'] = 'Arial'
infile = sys.argv[1] # which file to go over
window = int(sys.argv[2]) # what size window was used?
slide = int(sys.argv[3]) # what slide
bottompanel = int(sys.argv[4]) # this is how often to plot points on the bottom panel
scaffold_file = sys.argv[5] # file with the scaffold and length
color_scheme = sys.argv[6] # what color scheme to use?
outdir = sys.argv[7] # where to output the pdf
# this is to process the file and output the shading correctly
def scale_dict(info_file):
info_read = open(info_file, "r")
info_dict = {}
final_lenny = 0
lenny = 0
for line in info_read:
linetab = (line.rstrip()).split("\t")
scaffy = linetab[0]
final_lenny = final_lenny + lenny
lenny = int(linetab[1])
info_dict[scaffy] = final_lenny
info_read.close()
return(info_dict)
# processing the scaffold information
shader = scale_dict(scaffold_file)
maxx = sum(pd.read_csv(scaffold_file, sep="\t", header=None)[1])
print(maxx)
midwin = window/2
outfiley = open(outdir+"/connected_fragments.txt", "w")
b = pd.read_csv(infile)
comparisons = list(b.columns[3:])
strains = []
ncomps = len(comparisons)
for comp in comparisons:
strainies = comp.split(";")
if (strainies[0] in strains)==False:
strains.append(strainies[0])
if comp == comparisons[ncomps-1]:
strains.append(strainies[1])
nstrains = len(strains)
print("nstrains", nstrains)
grids = sum(range(nstrains+4))+25
print("grids", grids)
fig = plot.figure(figsize=(grids, grids), dpi=10000)
gs = gridspec.GridSpec(grids, grids)
begger = 1
mover = nstrains-1
ender = begger + mover
lastfirst = None
boxcount = pd.Series()
## let's do colors
## have to normalize the data set first so that they're continuous
norm = matplotlib.colors.Normalize(vmin=75, vmax=100)
m = cm.ScalarMappable(norm=norm, cmap=color_scheme)
for comparison in comparisons:
print("%s: %s"%("processing", comparison))
compare = comparison.split(";")
compcol = b[comparison]
first = compare[0]
if lastfirst == None:
lastfirst = first
ax = plot.subplot(gs[begger:ender, :])
standing = strains.index(first)
remains = strains[standing+1:][::-1]
ncomp = len(remains)-1
tickies = list(pd.Series(range(len(remains)))+0.5)
# label offset needs to be .2 to worky well
plotsizies = ender - begger
tickplace = (plotsizies+0.25)/plotsizies
ax.set_title(first, y=tickplace)
####
ax.title.set_fontsize(80) # this is very awkward, but it the only way to do this
ax.set_ylim(0,ender-begger)
ax.set_xlim(0, max(b['total_med']))
yloc = plticker.FixedLocator(tickies)
ax.yaxis.set_ticklabels(remains)
ax.yaxis.set_tick_params(labelsize=60)
ax.yaxis.set_major_locator(yloc)
xloc = plticker.MultipleLocator(2000000)
ax.xaxis.set_major_locator(xloc)
lockyx = list(ax.xaxis.get_major_locator().tick_values(0,max(b['total_med'])))
ax.xaxis.set_tick_params(labelsize=150, colors='black')
# for better labeling:
new_lockyx = [int(i) for i in lockyx] # this is to create labels with numbers
xlabs = []
for i in new_lockyx:
j = str(i)
if len(str(j)) <= 2:
xlabs.append(i/1)
elif 3 <= len(str(j)) <= 6:
xlabs.append(i/1000)
elif 3 <= len(str(j)) <= 9:
xlabs.append(i/1000000)
else:
xlabs.append(round(i/float(1000000000), 1))
ax.xaxis.set_ticklabels(xlabs)
# this are the variables for the shading below
old = None
shade = True
# here comes the shading
for contig in sorted(shader):
val = shader[contig]
if old != None and shade == True:
plot.axvspan(old, val, color='0.85', alpha=0.5)
shade = False
else:
if old != None:
shade = True
old = shader[contig]
# the last one
if shade == True:
plot.axvspan(old, maxx, color='0.85', alpha=0.5)
else:
if first == lastfirst:
ncomp = ncomp- 1
pass
else:
standing = strains.index(first)
remains = strains[standing+1:][::-1]
ncomp = len(remains)-1
begger = ender + 4
mover = mover - 1
ender = begger + mover
lastfirst = first
ax = plot.subplot(gs[begger:ender, :])
tickies = list(pd.Series(range(len(remains)))+0.5)
# label offset needs to be .2 to worky well
plotsizies = ender - begger
tickplace = (plotsizies+0.25)/plotsizies
ax.set_title(first, y=tickplace)
####
ax.title.set_fontsize(80) # this is very awkward, but it the only way to do this
ax.set_ylim(0,ender-begger)
ax.set_xlim(0, max(b['total_med']))
yloc = plticker.FixedLocator(tickies)
ax.yaxis.set_ticklabels(remains)
ax.yaxis.set_tick_params(labelsize=60)
ax.yaxis.set_major_locator(yloc)
xloc = plticker.MultipleLocator(2000000)
ax.xaxis.set_major_locator(xloc)
lockyx = list(ax.xaxis.get_major_locator().tick_values(0,max(b['total_med'])))
ax.xaxis.set_tick_params(labelsize=150, colors='black')
# for better labeling:
new_lockyx = [int(i) for i in lockyx] # this is to create labels with numbers
xlabs = []
for i in new_lockyx:
j = str(i)
if len(str(j)) <= 2:
xlabs.append(i/1)
elif 3 <= len(str(j)) <= 6:
xlabs.append(i/1000)
elif 3 <= len(str(j)) <= 9:
xlabs.append(i/1000000)
else:
xlabs.append(round(i/float(1000000000), 1))
ax.xaxis.set_ticklabels(xlabs)
# this are the variables for the shading below
old = None
shade = True
# here comes the shading
for contig in sorted(shader):
val = shader[contig]
if old != None and shade == True:
plot.axvspan(old, val, color='0.85', alpha=0.5)
shade = False
else:
if old != None:
shade = True
old = shader[contig]
# the last one
if shade == True:
plot.axvspan(old, maxx, color='0.85', alpha=0.5)
second = compare[1]
############################## 75 ####################################
asel = compcol[(compcol >= 75) & (compcol < 83)]
samies = list(asel.index)
singletons = set(samies)
if len(samies) > 0:
print("processing 75-83% similar")
backsies = pd.Series(samies[1:]+[-20])
fronties = pd.Series([-20]+samies[:-1])
normies = pd.Series(samies)
topgo = backsies-samies
bottomgo = samies-fronties
tops = pd.Series([-20]+list(backsies[topgo == 1]))
bottoms = pd.Series(list(fronties[bottomgo == 1]))
cancels = tops-bottoms
endies = list(tops[cancels != 0])[1:] # no need to +1 because end included with .loc
beggies = list(bottoms[cancels != 0])
for terrier in range(len(endies)):
slicey = b.loc[beggies[terrier]:endies[terrier],'total_med']
aver = int(mean(b.loc[beggies[terrier]:endies[terrier], comparison]))
colori = m.to_rgba(aver)
singletons = singletons - set(range(beggies[terrier],endies[terrier]+1))
begpos = min(slicey)-midwin
endpos = max(slicey)+midwin
outfiley.write("%s\t%s\t%s\t%s\t%s\n" %(first, second, str(begpos), str(endpos), str(75)))
ax.add_patch(patches.Rectangle((begpos, ncomp),endpos-begpos, 1, fc = colori, ec=None))
# let's deal with singletons:
for single in singletons:
medwinpos = b.loc[single,'total_med']
begpos = medwinpos-midwin
endpos = medwinpos+midwin
outfiley.write("%s\t%s\t%s\t%s\t%s\n" %(first, second, str(begpos), str(endpos), str(75)))
patchy = patches.Rectangle((begpos, ncomp),endpos-begpos, 1)
ax.add_patch(patches.Rectangle((begpos, ncomp),endpos-begpos, 1, fc = colori, ec=None))
############################## 83 ####################################
asel = compcol[(compcol >= 83) & (compcol < 90)]
samies = list(asel.index)
singletons = set(samies)
if len(samies) > 0:
print("processing 83-90% similar")
backsies = pd.Series(samies[1:]+[-20])
fronties = pd.Series([-20]+samies[:-1])
normies = pd.Series(samies)
topgo = backsies-samies
bottomgo = samies-fronties
tops = pd.Series([-20]+list(backsies[topgo == 1]))
bottoms = pd.Series(list(fronties[bottomgo == 1]))
cancels = tops-bottoms
endies = list(tops[cancels != 0])[1:] # no need to +1 because end included with .loc
beggies = list(bottoms[cancels != 0])
for terrier in range(len(endies)):
slicey = b.loc[beggies[terrier]:endies[terrier],'total_med']
aver = int(mean(b.loc[beggies[terrier]:endies[terrier], comparison]))
colori = m.to_rgba(aver)
singletons = singletons - set(range(beggies[terrier],endies[terrier]+1))
begpos = min(slicey)-midwin
endpos = max(slicey)+midwin
outfiley.write("%s\t%s\t%s\t%s\t%s\n" %(first, second, str(begpos), str(endpos), str(83)))
ax.add_patch(patches.Rectangle((begpos, ncomp),endpos-begpos, 1, fc = colori, ec=None))
# let's deal with singletons:
for single in singletons:
medwinpos = b.loc[single,'total_med']
begpos = medwinpos-midwin
endpos = medwinpos+midwin
outfiley.write("%s\t%s\t%s\t%s\t%s\n" %(first, second, str(begpos), str(endpos), str(83)))
patchy = patches.Rectangle((begpos, ncomp),endpos-begpos, 1)
ax.add_patch(patches.Rectangle((begpos, ncomp),endpos-begpos, 1, fc = colori, ec=None))
############################## 90 #############################################
asel = compcol[(compcol >= 90) & (compcol < 95)]
samies = list(asel.index)
singletons = set(samies)
if len(samies) > 0:
print("processing 90-95% similar")
backsies = pd.Series(samies[1:]+[-20])
fronties = pd.Series([-20]+samies[:-1])
normies = pd.Series(samies)
topgo = backsies-samies
bottomgo = samies-fronties
tops = pd.Series([-20]+list(backsies[topgo == 1]))
bottoms = pd.Series(list(fronties[bottomgo == 1]))
cancels = tops-bottoms
endies = list(tops[cancels != 0])[1:] # no need to +1 because end included with .loc
beggies = list(bottoms[cancels != 0])
for terrier in range(len(endies)):
slicey = b.loc[beggies[terrier]:endies[terrier],'total_med']
aver = int(mean(b.loc[beggies[terrier]:endies[terrier], comparison]))
colori = m.to_rgba(aver)
singletons = singletons - set(range(beggies[terrier],endies[terrier]+1))
begpos = min(slicey)-midwin
endpos = max(slicey)+midwin
outfiley.write("%s\t%s\t%s\t%s\t%s\n" %(first, second, str(begpos), str(endpos), str(90)))
ax.add_patch(patches.Rectangle((begpos, ncomp),endpos-begpos, 1, fc = colori, ec=None))
# let's deal with singletons:
for single in singletons:
medwinpos = b.loc[single,'total_med']
begpos = medwinpos-midwin
endpos = medwinpos+midwin
outfiley.write("%s\t%s\t%s\t%s\t%s\n" %(first, second, str(begpos), str(endpos), str(90)))
ax.add_patch(patches.Rectangle((begpos, ncomp),endpos-begpos, 1, fc = colori, ec=None))
############################## 95 #############################################
# these first, then the ones that are the same
asel = compcol[compcol >= 95]
samies = list(asel.index)
singletons = set(samies)
# let's plot these high values first
# start with a boolean approach to identify long fragments of similarity
if len(samies) > 0:
print("processing 95% similar and higher")
backsies = pd.Series(samies[1:]+[-20])
fronties = pd.Series([-20]+samies[:-1])
normies = pd.Series(samies)
topgo = backsies-samies
bottomgo = samies-fronties
tops = pd.Series([-20]+list(backsies[topgo == 1]))
bottoms = pd.Series(list(fronties[bottomgo == 1]))
cancels = tops-bottoms
endies = list(tops[cancels != 0])[1:] # no need to +1 because end included with .loc
beggies = list(bottoms[cancels != 0])
keep_score = set([])
for terrier in range(len(endies)):
slicey = b.loc[beggies[terrier]:endies[terrier],'total_med']
aver = int(mean(b.loc[beggies[terrier]:endies[terrier], comparison]))
colori = m.to_rgba(aver)
singletons = singletons - set(range(beggies[terrier],endies[terrier]+1))
begpos = min(slicey)-midwin
endpos = max(slicey)+midwin
########## this is for frequency plotting later
testbeg = begpos%bottompanel
if testbeg == 0:
shift = 0
else:
shift = bottompanel-testbeg
actualbeg = begpos + shift
#end
testend = endpos%bottompanel
shift = bottompanel-testend
actualend = endpos + shift
# now get the stuff
stellar = [i for i in range(actualbeg, actualend, bottompanel) | |
<reponame>galletitaoreo/PythonPentest
# -*- encoding: utf-8 -*-
'''
Author:<NAME>
email:<EMAIL>
'''
import sys
import os
import nmap
import socket
import getopt, sys
import platform
import urllib, urllib2, json
import ftplib
import time
import utils
import re
import argparse
try:
from bs4 import BeautifulSoup
except Exception as e:
print("pip install beautifulsoup4")
exit(1)
try:
import requests
except:
print "Request library not found, please install it before proceeding\n"
sys.exit()
#datetime
from datetime import datetime
from pywebfuzz import fuzzdb
#for request password
import getpass
from Log import Log
from NexposeFrameWork import NexposeFrameWork
from MetaSploitFrameWork import MetaSploitFrameWork
from UtilDNS import UtilDNS
from ShodanSearch import ShodanSearch
from SSHConnection import SSHConnection
from FTPConnection import FTPConnection
from Checker import Checker
from Scraping import Scraping
from InfoLocation import InfoLocation
from ScanningNMAP import ScanningNMAP
from HTTPScan import HTTPScan
from CheckOpenSslVulnerable import CheckOpenSslVulnerable
from CheckFTPVulnerable import CheckFTPVulnerable
from NmapScanner import NmapScanner
from ExtractMails import ExtractMails
from Search import Search
from CheckVuln_SQL_XSS_LFI import CheckVuln_SQL_XSS_LFI
from IdentifyServer import IdentifyServer
from CheckHeadersXSS import CheckHeadersXSS
from CheckCookies import CheckCookies
#from ScannerScapy import ScannerScapy
#nexpose
import pynexposeHttps
import builtwith
def author():
return """
+---------------------------------------------------------+
|Author: <NAME> - @jmortegac |
|Web site: http://about.me/jmortegac |
|Project: https://github.com/jmortega/python-pentesting |
+---------------------------------------------------------+
"""
def showMenu():
print "[0]-->EXIT"
print "[1]-->Check Open Ports[80,8080 by default]"
print "[2]-->Port Scanning[It will scan over ports parameter,by default it will scan 80 and 8080]"
print "[3]-->Nmap Scanning Advanced"
print "[4]-->Check Option methods"
print "[5]-->Check DNS Servers info"
print "[6]-->Check Host info from Shodan Service"
print "[7]-->NMAP Port Scanning"
print "[8]-->Host Info by Socket Call"
print "[9]-->GeoLocation Host Info"
print "[10]-->Scraping for images and pdf & obtain metadata"
print "[11]-->Get Headers info"
print "[12]-->Get SSH user/password Brute Force[Requires port 22 opened]"
print "[13]-->Get FTP Anonymous access[Requires port 21 opened]"
print "[14]-->MetaSploitFrameWork"
print "[15]-->NexposeFramework"
print "[16]-->HTTP SCAN[Requires port 80 opened]"
print "[17]-->Check HeartBleed OpenSSL vulnerability[Requires port 443 opened]"
print "[18]-->Check FTP Server Buffer Overflow Vulnerability[Requires port 21 opened]"
print "[19]-->Check Vulnerabilities SQL,XSS,LFI in domain"
print "[20]-->Check Domains and obtain metadata[mails, hosts, servers,urls]"
print "[21]-->Check open ports with scapy"
print "[22]-->Check website libraries"
print "[23]-->Identify web server"
print "[24]-->Check headers & Clickjacking"
print "[25]-->Check Cookies from website"
option = raw_input ("Choose an option:")
return option
if __name__ == "__main__":
print(author())
parser = argparse.ArgumentParser(description='Pentesting-tool')
# Main arguments
parser.add_argument("-target", dest="target", help="target IP / domain", required=None)
parser.add_argument("-ports", dest="ports", help="Please, specify the target port(s) separated by comma[80,8080 by default]", default = "80,8080")
parser.add_argument("-proxy", dest="proxy", help="Proxy[IP:PORT]", required=None)
parsed_args = parser.parse_args()
shodanSearch = ShodanSearch()
dnsResolver = UtilDNS()
sshConnection = SSHConnection()
checker = Checker()
scraping = Scraping()
scanningNMAP = ScanningNMAP()
infoLocation = InfoLocation()
httpScan = HTTPScan()
checkOpenSslVulnerable = CheckOpenSslVulnerable()
checkFtpVulnerable = CheckFTPVulnerable()
extractMails = ExtractMails()
checkVuln_SQL_XSS_LFI = CheckVuln_SQL_XSS_LFI()
#scannerScapy = ScannerScapy()
#default port list
ip = ""
hostname = ""
option = ""
ip_server_metasploit = ""
port_server_metasploit = ""
user_metasploit = ""
password_metasploit = ""
shodan_results = []
shodan_visited = []
pyconnect = 0
if parsed_args.target == None:
while (hostname ==""):
hostname = raw_input ("[*] Introduce IP or name domain:")
else:
hostname = parsed_args.target
print("\n [*] Obtain Ip address from host name")
print "-----------------------------------"
ip = socket.gethostbyname(hostname)
print '[*] The IP address of ', hostname, 'is', ip
while option != 0:
print "\n [*] IP/Hostname: "+ ip + " / " + hostname
option = showMenu()
if option == "0":
sys.exit(1);
if option == "1":
f = open('logOpenPorts.txt', 'a')
sys.stdout = Log(sys.stdout, f)
portlist = parsed_args.ports.split(',')
checker.checkOpenPorts(ip,hostname,portlist)
if option == "2":
f = open('logPortScanning.txt', 'a')
sys.stdout = Log(sys.stdout, f)
print "[*] Scanning ports "+ parsed_args.ports + "..."
portlist = parsed_args.ports.split(',')
for port in portlist:
NmapScanner().nmapScan(ip, port)
NmapScanner().nmapScanJSONGenerate(ip, parsed_args.ports)
if option == "3":
f = open('logNmapScanningAdvanced.txt', 'a')
sys.stdout = Log(sys.stdout, f)
checker.checkNmapOptions(ip)
if option == "4":
f = open('logOptionMethods.txt', 'a')
sys.stdout = Log(sys.stdout, f)
checker.checkOptionMethods(hostname)
if option == "5":
f = open('logDnsInfo.txt', 'a')
sys.stdout = Log(sys.stdout, f)
dnsResolver.checkDNSInfo(ip,hostname)
if option == "6":
f = open('logHostInfo.txt', 'a')
sys.stdout = Log(sys.stdout, f)
shodanSearch.obtain_host_info2(ip)
shodanSearch.obtain_host_info2(hostname)
if option == "7":
f = open('logNScanningNmap.txt', 'a')
sys.stdout = Log(sys.stdout, f)
portlist = parsed_args.ports
scanningNMAP.scanNMAP(ip,portlist)
if platform.system() == "Linux":
scanningNMAP.scanningNmapUnix(ip,hostname,portlist)
if platform.system() == "Windows":
scanningNMAP.scanningNmapWindows(ip,hostname,portlist)
if option == "8":
f = open('logHostByName.txt', 'a')
sys.stdout = Log(sys.stdout, f)
print socket.gethostbyname(hostname)
if option == "9":
f = open('logGeoLocationInfo.txt', 'a')
sys.stdout = Log(sys.stdout, f)
infoLocation.printRecord(ip)
infoLocation.printRecord(hostname)
infoLocation.geoInfo(hostname,ip)
if option == "10":
f = open('logScraping.txt', 'a')
sys.stdout = Log(sys.stdout, f)
print '\njpg images'
print '--------------'
scraping.getImgFromUrl(hostname, 'jpg')
print '\npng images'
print '--------------'
scraping.getImgFromUrl(hostname, 'png')
print '\ngif images'
print '--------------'
scraping.getImgFromUrl(hostname, 'gif')
scraping.scrapingImagesPdf(hostname)
scraping.scrapingImagesPdf(ip)
scraping.scrapingBeautifulSoup(hostname)
scraping.scrapingBeautifulSoup(ip)
if option == "11":
f = open('logCheckHeaders.txt', 'a')
sys.stdout = Log(sys.stdout, f)
checker.checkHeadersInfoByIp(ip)
checker.checkHeadersInfoByHostName(hostname)
if option == "12":
f = open('logSSHBruteForce.txt', 'a')
sys.stdout = Log(sys.stdout, f)
sshConnection.SSHBruteForce(hostname)
if option == "13":
f = open('logFTP.txt', 'a')
sys.stdout = Log(sys.stdout, f)
ftpConnection = FTPConnection(hostname)
ftpConnection.ftpConnectionAnonymous()
if option == "14":
while (ip_server_metasploit == ""):
ip_server_metasploit = raw_input ("[*] Introduce IP server where MetaSploit is running:")
while (port_server_metasploit == ""):
port_server_metasploit = raw_input ("[*] Introduce Port server where MetaSploit is running:")
while (user_metasploit == ""):
user_metasploit = raw_input ("[*] Introduce user for MetaSploit:")
while (password_metasploit == ""):
password_metasploit = getpass.getpass ("[*] Introduce password for MetaSploit:")
try:
f = open('metaSploit_log.txt', 'w')
sys.stdout = Log(sys.stdout, f)
metaSploitFrameWork = MetaSploitFrameWork(port_server_metasploit,ip_server_metasploit,ip,user_metasploit,password_metasploit)
metaSploitFrameWork.scanMetaSploitFrameWork()
except Exception,e:
print "Error to connecting with MetaSploit Server"
print e
pass
if option == "15":
if pyconnect == 0:
serveraddr_nexpose = ""
port_server_nexpose = ""
user_nexpose = ""
password_nexpose = ""
while (serveraddr_nexpose == ""):
serveraddr_nexpose = raw_input ("[*] Introduce IP server where Nexpose is running:")
while (port_server_nexpose == ""):
port_server_nexpose = raw_input ("[*] Introduce Port server where Nexpose is running:")
while (user_nexpose == ""):
user_nexpose = raw_input ("[*] Introduce user for Nexpose:")
while (password_nexpose == ""):
password_nexpose = getpass.getpass ("[*] Introduce password for Nexpose:")
try:
if pyconnect == 0:
pynexposeHttps = pynexposeHttps.NeXposeServer(serveraddr_nexpose, port_server_nexpose, user_nexpose, password_nexpose)
pyconnect = 1
except Exception,e:
pyconnect = 0
print e.message
print "Error to connecting with NeXposeServer"
pass
try:
f = open('nexpose_log.txt', 'w')
sys.stdout = Log(sys.stdout, f)
nexposeFrameWork = NexposeFrameWork(pynexposeHttps)
nexposeFrameWork.siteListing()
nexposeFrameWork.vulnerabilityListing()
pynexposeHttps.logout()
except Exception,e:
print "Error to connecting with NeXposeServer for listing vulnerabilities"
print e
pass
if option == "16":
f = open('logHTTPScan.txt', 'a')
sys.stdout = Log(sys.stdout, f)
httpScan.startHTTPScanBruteForce(hostname,ip,parsed_args.proxy)
if option == "17":
f = open('logCheckHeartbleed.txt', 'a')
sys.stdout = Log(sys.stdout, f)
checkOpenSslVulnerable.startCheckVulnerability(ip,hostname)
if option == "18":
f = open('logCheckFTPvulnerable.txt', 'a')
sys.stdout = Log(sys.stdout, f)
checkFtpVulnerable.startCheckVulnerability(ip,hostname)
if option == "19":
f = open('logCheckvuln_SQL_XSS_LFI.txt', 'a')
sys.stdout = Log(sys.stdout, f)
checkVuln_SQL_XSS_LFI.startCheckVulnerability(ip,hostname)
if option == "21":
f = open('logCheckOpenPortsScapy.txt', 'a')
sys.stdout = Log(sys.stdout, f)
#scannerScapy.scan_ports_multithread(hostname,parsed_args.ports)
if option == "22":
f = open('logCheckLibrariesWebsite.txt', 'a')
sys.stdout = Log(sys.stdout, f)
url = utils.verify_url(hostname)
print 'Obtaining libraries from website ' + url
print builtwith.parse(str(url))
if option == "23":
f = open('logIdentifyWebServer.txt', 'a')
sys.stdout = Log(sys.stdout, f)
url = utils.verify_url(hostname)
print 'Identify Server from ' + url
identifyServer = IdentifyServer()
identifyServer.test(url)
identifyServer.test(hostname)
identifyServer.test(ip)
if option == "24":
f = open('logCheckHeadersXSS.txt', 'a')
sys.stdout = Log(sys.stdout, f)
url = utils.verify_url(hostname)
print 'Checking headers from ' + url
headersXSS = CheckHeadersXSS()
headersXSS.test(url)
if option == "25":
f = open('logCheckCookies.txt', 'a')
sys.stdout = Log(sys.stdout, f)
url = utils.verify_url(hostname)
print 'Checking cookies from ' + url
checkCookies = CheckCookies()
checkCookies.test(url)
if option == "20":
try:
f = open('logCheckDomains.txt', 'a')
sys.stdout = Log(sys.stdout, f)
html = utils.HtmlExtractor(hostname)
#url always starts with http:// or https://
print "[+] Searching urls, emails and domains......"
url = html.get_url()
print url
data_extractor = utils.DataExtractor(html.get_body(), url, only_href = True)
logins = fuzzdb.Discovery.PredictableRes.Logins
httpMethods= fuzzdb.attack_payloads.http_protocol.http_protocol_methods
data_extractor.get_predictable_urls(url,logins)
data_extractor.get_http_methods(url,httpMethods)
urls = data_extractor.get_urls()
if len(urls)>0:
print "[+] URLS:"
for url in urls:
print url
extractMails.obtain_mails(url)
else:
print "[-] No URL found"
domains = data_extractor.get_domains(urls)
if len(domains)>0:
print "[+] Domains:"
for domain in domains:
print domain
extractMails.obtain_mails(domain)
else:
print "[-] No Domains found"
ips = data_extractor.get_ips_for_domains(domains)
for key in ips.iterkeys():
print key, ": ", " ".join(ips[key])
extractMails.obtain_mails(hostname)
except Exception,e:
pass
host=''
if hostname.startswith("www") == True:
parts = hostname.split(".")
for part in parts:
if part !='www':
host+=part+'.'
host = host[:-1]
else:
host = hostname
search = Search(host)
search.process()
emails = search.get_emails()
hosts = search.get_hostnames()
full = []
print "\n\n[+] Emails:"
print "------------------"
if emails == []:
print "No emails found"
else:
for email in emails:
print email
print "\n[+] Hosts:"
print "------------------------------------"
if | |
in_row[u'收件人邮编']
n_package = in_row.get(u'物品种类数量', None)
if not n_package:
n_package = in_row.get(u'包裹数量', None)
package_weight = in_row[u'包裹重量(公斤)']
length = in_row[u'长(厘米)']
width = in_row[u'宽(厘米)']
height = in_row[u'高(厘米)']
id_number = in_row[u'身份证号(EMS需要)']
for check_field in (sender_name, sender_phone, sender_address, receiver_name, receiver_mobile, receiver_address,
receiver_city, receiver_post_code, id_number):
if pd.isnull(check_field) or not isinstance(check_field, basestring) or not check_field.strip():
raise Exception, u"第%d行数据不完整,请更正" % n_row
if pd.isnull(n_package) or not isinstance(n_package, int) or n_package < 1:
raise Exception, u"第%d行 物品种类数量 或者 包裹数量 异常" % n_row
sender_name = "".join(sender_name.split())
sender_address = "".join(sender_address.split())
sender_phone = "".join(sender_phone.split())
receiver_name = "".join(receiver_name.split())
receiver_mobile = "".join(receiver_mobile.split())
receiver_address = "".join(receiver_address.split())
receiver_city = "".join(receiver_city.split())
receiver_post_code = "".join(receiver_post_code.split())
id_number = "".join(id_number.split())
package_type, order, receiver_province, receiver_municipal, receiver_address_header = \
fetch_ticket_number(n_row, receiver_city)
receiver_city = receiver_municipal
receiver_address = receiver_address_header + receiver_address
pc_text = receiver_province + receiver_municipal
receiver_province_city_font_size = "3" if len(
pc_text) <= 10 else "2.5" if len(pc_text) <= 15 else "2"
if not ticket_number_generator:
order.used = True
order.used_time = datetime.datetime.utcnow()
order.sender_address = ", ".join(
(sender_name, sender_address, sender_phone))
order.receiver_address = ", ".join(
(receiver_address, receiver_city, receiver_post_code))
order.receiver_mobile = receiver_mobile
order.receiver_id_number = id_number
order.receiver_name = receiver_name
if job:
order.job = job
job.version = "v3"
ticket_number = order.order_number
else:
ticket_number = ticket_number_generator.next()
full_address = "".join(filter(
lambda x: x.strip(), (receiver_province, receiver_city, receiver_address)))
p_data_list = []
item_names = []
total_price = 0
total_item_count = 0
total_net_weight = 0
total_gross_weight = 0
for i in xrange(n_package):
suffix = "" if i == 0 else ".%d" % i
item_name = in_row[u'申报物品%d(英文)' % (i + 1)]
item_count = in_row[u'数量%s' % suffix]
unit_price = in_row[u'物品单价(英镑)%s' % suffix]
if item_name is None or pd.isnull(item_name):
raise Exception, u"第%d行第%d个商品名称为空" % (n_row, i + 1)
item_name = str(item_name).strip()
sub_total_price, net_weight, gross_weight, unit_price, item_full_name, net_weight_per_item, tax_code, \
billing_unit, billing_unit_code, unit_per_item, specification, waybill_name \
= calculate_item_info_from_db_without_product_info(n_row, item_name, item_count)
item_names.append(u"%s\u2736%d" % (waybill_name, item_count))
total_price += sub_total_price
total_item_count += item_count
total_net_weight += net_weight
total_gross_weight += gross_weight
p_data_list.append([
ticket_number, sender_name, sender_address, sender_phone, receiver_name, receiver_mobile, receiver_city if receiver_city else receiver_province,
receiver_post_code, full_address, item_full_name, item_count, sub_total_price, gross_weight, item_full_name,
net_weight, unit_price, u"CNY", id_number,
City.denormalize_province(receiver_province),
City.denormalize_municipality(receiver_city if receiver_city else receiver_province)
])
# for p in p_data_list:
# p[10] = total_item_count
# p[11] = total_price
# p[12] = total_gross_weight
# p_data.append(p)
p_data = p_data_list
total_price = "%.2f" % total_price
if total_price.endswith(".00") and len(total_price) > 3:
total_price = total_price[:-3]
item_names = ", ".join(item_names)
generate_pdf(ticket_number, os.path.join(
barcode_dir, '%s.pdf' % ticket_number), locals(), tmpdir)
return ticket_number, pd.DataFrame(p_data, columns=[
u'快件单号', u'发件人', u'发件人地址', u'电话号码', u'收件人', u'电话号码.1', u'城市',
u'邮编', u'收件人地址', u'内件名称', u'数量', u'总价(元)', u'毛重(KG)', u'物品名称',
u'数量.1', u'单价', u'币别', u'备注', 'province', 'city'
])
def normalize_columns(in_df):
in_df.columns = map(lambda x: "".join(x.strip().split()), in_df.columns)
def xls_to_orders(input, output, tmpdir, percent_callback=None, job=None, test_mode=False):
if percent_callback:
percent_callback(0)
in_df = pd.read_excel(input, converters={
u'发件人电话号码': lambda x: str(x),
u'收件人邮编': lambda x: str(x),
u'收件人手机号\n(11位数)': lambda x: str(x),
u'身份证号\n(EMS需要)': lambda x: str(x),
u'收件人手机号(11位数)': lambda x: str(x),
u'身份证号(EMS需要)': lambda x: str(x),
u'包裹数量': lambda x: int(x),
u'物品种类数量': lambda x: int(x),
})
if 'MAX_ORDER_PER_BATCH' in current_app.config \
and len(in_df.index) > current_app.config['MAX_ORDER_PER_BATCH']:
raise Exception, u"该批次个数(%d)超过最大订单数: %d" % \
(len(in_df.index), current_app.config['MAX_ORDER_PER_BATCH'])
normalize_columns(in_df)
package_columns = [u"报关单号", u'总运单号', u'袋号', u'快件单号', u'发件人', u'发件人地址',
u'电话号码', u'收件人', u'电话号码.1', u'城市', u'邮编', u'收件人地址', u'内件名称',
u'数量', u'总价(元)', u'毛重(KG)', u'税号', u'物品名称', u'品牌', u'数量.1',
u'单位', u'单价', u'币别', u'备注', 'province', 'city']
package_df = pd.DataFrame([], columns=package_columns)
package_data = [package_df]
barcode_dir = os.path.join(output, "barcode")
if not os.path.exists(barcode_dir):
os.makedirs(barcode_dir)
ticket_numbers = []
ticket_number_set = set()
test_ticket_number_generator = None
if test_mode:
def ticket_number_generator():
start_number = 1
while True:
yield "TEST%s" % str(start_number).zfill(8)
start_number += 1
test_ticket_number_generator = ticket_number_generator()
if job:
job.version = "test_mode"
for index, in_row in in_df.iterrows():
ticket_number, p_data = process_row(
index, in_row, barcode_dir, tmpdir, job, test_ticket_number_generator)
if ticket_number in ticket_number_set:
raise Exception, u"同批次单号%s重复,请联系客服!" % ticket_number
ticket_number_set.add(ticket_number)
ticket_numbers.append(ticket_number)
package_data.append(p_data)
if percent_callback:
percent_callback(int(index * 100.0 / len(in_df.index)))
waybills = []
total_page_number = 0
merger = PdfFileMerger()
for ticket_number in ticket_numbers:
pdf_file = os.path.join(barcode_dir, "%s.pdf" % ticket_number)
if not os.path.exists(pdf_file):
raise Exception, "Failed to generate pdf: %s" % ticket_number
pdf_file_reader = PdfFileReader(file(pdf_file, 'rb'))
page_number = pdf_file_reader.getNumPages()
waybills.append({
'tracking_no': ticket_number,
'start_page': total_page_number,
'end_page' : total_page_number + page_number,
})
total_page_number += page_number
merger.append(pdf_file_reader)
merger.write(os.path.join(output, u"面单.pdf".encode('utf8')))
with open(os.path.join(output, "waybills.json"), 'w') as outfile:
json.dump(waybills, outfile)
shutil.rmtree(barcode_dir)
package_final_df = pd.concat(package_data, ignore_index=True)
package_final_df[u'税号'] = '01010700'
package_final_df[u'单位'] = u'千克'
package_final_df.index += 1
package_final_df.to_excel(os.path.join(output, u"机场报关单.xlsx".encode('utf8')),
columns=package_columns, index_label="NO")
if percent_callback:
percent_callback(100)
def read_order_numbers(inxlsx):
columns = [u'提取单号', u'分运单号', u'快件单号', u'物流运单编号']
df = pd.read_excel(inxlsx, converters={
key: lambda x: str(x) for key in columns
})
column = None
for key in columns:
if key in df:
column = key
if not column:
raise Exception, u"输入Excel格式错误"
order_numbers = df[column].unique()
if len(order_numbers) <= 0:
raise Exception, u"输入[%s]列为空" % column
return order_numbers
def generate_customs_df(route_config, version, package_df):
route_name = route_config['name']
if version <> "v3":
raise Exception, "Version not supported for generate_customs_df: %s" % version
package_df["Sequence"] = range(1, len(package_df.index) + 1)
customs_columns = [u'分运单号', u'申报类型', u'物品名称', u'英文物品名称', u'商品编码', u'净重(KG)', u'毛重(KG)',
u'规格/型号', u'产销城市', u'币制', u'申报数量', u'申报总价', u'申报计量单位', u'收件人', u'收件人城市',
u'收件人地址', u'收件人电话', u'发件人国家', u'发件人', u'英文发件人', u'发件人城市', u'英文发件人城市',
u'英文经停城市', u'发件人地址', u'英文发件人地址', u'发件人电话', u'收发件人证件类型', u'收发件人证件号',
u'包装种类', u'是否含木质包装', u'是否为旧物品', u'是否为低温运输', u'生产国别', u'贸易国别']
customs_df = pd.DataFrame([], columns=customs_columns)
for column, p_column in ((u'分运单号', u'快件单号'),
(u'物品名称', u'内件名称'),
(u'数量', u'数量'),
(u'毛重(KG)', u'毛重(KG)'),
(u'收件人', u'收件人'),
(u'收发件人证件号', u'备注'),
(u'收件人城市', u'city'),
(u'收件人地址', u'收件人地址'),
(u'收件人电话', u'电话号码.1'),
(u'发件人', u'发件人'),
(u'英文发件人', u'发件人'),
(u'发件人地址', u'发件人地址'),
(u'英文发件人地址', u'发件人地址'),
(u'发件人电话', u'电话号码'),
('Sequence', 'Sequence')):
customs_df[column] = package_df[p_column]
#fill in bc product info
product_info_df = pd.read_sql_query(ProductInfo.query.filter(ProductInfo.full_name.in_(
tuple(set(customs_df[u'物品名称'].map(lambda x: str(x)).tolist())))).statement, db.session.bind)
columns_to_delete = product_info_df.columns
product_info_df.rename(columns={'full_name': u'物品名称'}, inplace=True)
customs_df = pd.merge(customs_df, product_info_df, on=u'物品名称')
product_info_columns = [(u"申报单价", "unit_price"),
(u"商品编码", "tax_code"),
(u"规格/型号", "specification"),
(u"申报计量单位", "billing_unit_code")]
# check if any is empty
for column, _column in product_info_columns \
+ [(u"单个物品申报数量", "unit_per_item"),
(u"小票名称", "ticket_name"),
(u"小票价格", "ticket_price")]:
null_valued = pd.isnull(customs_df[_column])
if null_valued.any():
product_name_null_valued = customs_df[null_valued][u'物品名称'].drop_duplicates() \
.map(lambda x: str(x)).tolist()
raise Exception, u"如下商品的注册信息未包含必须字段[%s]: %s" % \
(column, ", ".join(product_name_null_valued))
ticket_info = {
'groups': customs_df[[u'分运单号', "ticket_name", u'数量', "ticket_price"]].groupby(u'分运单号'),
'item_column': 'ticket_name',
'count_column': u'数量',
'price_column': 'ticket_price',
}
for column, p_column in product_info_columns:
customs_df[column] = customs_df[p_column]
def customs_column_filter(row):
row[u"物品名称"] = row[u"物品名称"] if pd.isnull(row["report_name"]) else row["report_name"]
row[u"英文物品名称"] = row["ticket_name"]
row[u"净重(KG)"] = row[u"数量"] * row["net_weight"]
row[u'申报数量'] = row[u'数量'] * row["unit_per_item"]
row[u'申报总价'] = row[u'申报数量'] * row[u"申报单价"]
row[u'英文发件人'] = unidecode(row[u'发件人'])
row[u'英文发件人地址'] = unidecode(row[u'发件人地址'])
return row
customs_df = customs_df.apply(customs_column_filter, axis=1)
for column in columns_to_delete:
if column in customs_df:
del customs_df[column]
customs_df.sort_values(by=["Sequence"], inplace=True)
#fixed items
customs_df[u"申报类型"] = "B"
customs_df[u"产销城市"] = u"曼彻斯特"
customs_df[u"币制"] = "142"
customs_df[u"发件人国家"] = "303"
customs_df[u"发件人城市"] = "曼彻斯特"
customs_df[u"英文发件人城市"] = "Manchester"
customs_df[u"收发件人证件类型"] = "1"
customs_df[u"包装种类"] = "2"
customs_df[u"是否含木质包装"] = "0"
customs_df[u"是否为旧物品"] = "0"
customs_df[u"是否为低温运输"] = "0"
customs_df[u"生产国别"] = "303"
customs_df[u"贸易国别"] = "303"
#sort
customs_df.sort_values(by=["Sequence"], inplace=True)
del customs_df["Sequence"]
del package_df["Sequence"]
del customs_df[u"申报单价"]
del customs_df[u"数量"]
return customs_df, ticket_info
def generate_summary_wb(customs_df):
def summary_each_tax_code(group):
tax_code_column = group[u'商品编码'].unique()
assert(len(tax_code_column) == 1)
tax_code = tax_code_column[0]
name, unit = TAX_CODE_MAP.get(tax_code, ('', ''))
return pd.Series({
u'序号': '',
u'商品编号': tax_code,
u'物品名称': name,
u'件数(纸箱)': len(group[u'分运单号'].unique()),
u'重量': group[u'毛重(KG)'].sum(),
u'数量': group[u'数量'].sum(),
u'单位': unit,
u'币制': 'RMB',
u'价值': '',
u'备注': '',
})
columns = (u'序号', u'商品编号', u'物品名称', u'件数(纸箱)', u'重量', u'数量', u'单位', u'币制', u'价值', u'备注')
summary_df = customs_df.groupby(u'商品编码').apply(summary_each_tax_code)
summary_df = summary_df.reindex(columns=columns)
summary_df[u'序号'] = range(1, len(summary_df.index)+1)
summary_df = summary_df.append(summary_df.sum(numeric_only=True), ignore_index=True)
for key, value in ((u'序号', ''), (u'商品编号', u'合计')):
summary_df.iloc[-1, summary_df.columns.get_loc(key)] = value
wb = load_workbook(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'summary_header.xlsx'))
ws = wb[u'汇总清单']
row_count = 0
for r in dataframe_to_rows(summary_df, index=False, header=False):
row_count += 1
ws.append(r)
return wb
def map_full_name_to_report_name(data_df, column_name):
if not column_name in data_df.columns:
raise Exception, "%s not in header" % column_name
product_info_df = pd.read_sql_query(ProductInfo.query.filter(ProductInfo.full_name.in_(
tuple(set(data_df[column_name].map(lambda x: str(x)).tolist())))).statement, db.session.bind)
columns_to_delete = product_info_df.columns
product_info_df.rename(columns={'full_name': column_name}, inplace=True)
data_df = pd.merge(data_df, product_info_df, on=column_name)
data_df[column_name] = data_df.apply(lambda row:row['report_name'] if row['report_name'] else row[column_name],
axis=1)
for column in columns_to_delete:
if column in data_df:
del data_df[column]
return data_df
def remap_customs_df(customs_final_df):
wb = load_workbook(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'cc_header.xlsx'))
ws = wb[u"申报单格式"]
row_count = 0
for r in dataframe_to_rows(customs_final_df, index=False, header=False):
row_count += 1
ws.append(r)
# merge cell for this one
# base_index = 7
# last_value = 0
# last_row_num = None
# columns = (1, 2, 4, 15, 16, 17, 18, 19, 20, 22, 23, 24, 26)
#for row_num in range(base_index, base_index + row_count):
# rd = ws.row_dimensions[row_num]
# rd.height = 50
# is_last_row = (row_num == base_index + row_count - 1)
#
# package_index = int(ws.cell(row=row_num, column=1).value)
# assert (package_index > 0)
# if last_value <= 0:
# last_value = package_index
# last_row_num = row_num
| |
os.environ.get(environment_variable, None)
for environment_variable in self.environment_variables
}
yield environment
class SimpleTransforms(MetalNode):
def __init__(
self,
missing_keypath_action="ignore",
starting_path=None,
transform_mapping=None,
target_value=None,
keypath=None,
**kwargs
):
self.missing_keypath_action = missing_keypath_action
self.transform_mapping = transform_mapping or []
self.functions_dict = {}
self.starting_path = starting_path
for transform in self.transform_mapping:
# Not doing the transforms; only loading the right functions here
function_name = transform.get("target_function", None)
full_function_name = function_name
if function_name is not None:
components = function_name.split("__")
if len(components) == 1:
module = None
function_name = components[0]
function_obj = globals()[function_name]
else:
module = ".".join(components[:-1])
function_name = components[-1]
module = importlib.import_module(module)
function = getattr(module, function_name)
self.functions_dict[full_function_name] = function
super(SimpleTransforms, self).__init__(**kwargs)
def process_item(self):
logging.debug("TRANSFORM " + str(self.name))
logging.debug(self.name + " " + str(self.message))
for transform in self.transform_mapping:
path = transform["path"]
target_value = transform.get("target_value", None)
function_name = transform.get("target_function", None)
starting_path = transform.get("starting_path", None)
if function_name is not None:
function = self.functions_dict[function_name]
else:
function = None
function_kwargs = transform.get("function_kwargs", None)
function_args = transform.get("function_args", None)
logging.debug(self.name + " calling replace_by_path:")
replace_by_path(
self.message,
tuple(path),
target_value=target_value,
function=function,
function_args=function_args,
starting_path=starting_path,
function_kwargs=function_kwargs,
)
logging.debug("after SimpleTransform: " + self.name + str(self.message))
yield self.message
class Serializer(MetalNode):
"""
Takes an iterable thing as input, and successively yields its items.
"""
def __init__(self, values=False, *args, **kwargs):
self.values = values
super(Serializer, self).__init__(**kwargs)
def process_item(self):
if self.__message__ is None:
yield None
elif self.values:
for item in self.__message__.values():
yield item
else:
for item in self.__message__:
logging.debug(self.name + " " + str(item))
yield item
class AggregateValues(MetalNode):
"""
Does that.
"""
def __init__(self, values=False, tail_path=None, **kwargs):
self.tail_path = tail_path
self.values = values
super(AggregateValues, self).__init__(**kwargs)
def process_item(self):
values = aggregate_values(self.__message__, self.tail_path, values=self.values)
logging.debug("aggregate_values " + self.name + " " + str(values))
yield values
class Filter(MetalNode):
"""
Applies tests to each message and filters out messages that don't pass
Built-in tests:
key_exists
value_is_true
value_is_not_none
Example:
{'test': 'key_exists',
'key': mykey}
"""
def __init__(self, test=None, test_keypath=None, value=True, *args, **kwargs):
self.test = test
self.value = value
self.test_keypath = test_keypath or []
super(Filter, self).__init__(*args, **kwargs)
@staticmethod
def _key_exists(message, key):
return key in message
@staticmethod
def _value_is_not_none(message, key):
logging.debug(
"value_is_not_none: {message} {key}".format(message=str(message), key=key)
)
return get_value(message, key) is not None
@staticmethod
def _value_is_true(message, key):
return to_bool(message.get(key, False))
def process_item(self):
if self.test in ["key_exists", "value_is_not_none", "value_is_true"]:
result = (
getattr(self, "_" + self.test)(self.__message__, self.test_keypath)
== self.value
)
else:
raise Exception("Unknown test: {test_name}".format(test_name=test))
if result:
logging.debug("Sending message through")
yield self.message
else:
logging.debug("Blocking message: " + str(self.__message__))
yield NothingToSeeHere()
class StreamMySQLTable(MetalNode):
def __init__(
self,
*args,
host="localhost",
user=None,
table=None,
password=<PASSWORD>,
database=None,
port=3306,
to_row_obj=False,
send_batch_markers=False,
**kwargs
):
self.host = host
self.user = user
self.to_row_obj = to_row_obj
self.password = password
self.database = database
self.port = port
self.table = table
self.send_batch_markers = send_batch_markers
super(StreamMySQLTable, self).__init__(**kwargs)
def setup(self):
self.db = MySQLdb.connect(
passwd=self.password, db=self.database, user=self.user, port=self.port,
)
self.cursor = MySQLdb.cursors.DictCursor(self.db)
self.table_schema_query = (
"""SELECT column_name, column_type """
"""FROM information_schema.columns """
"""WHERE table_name='{table}';""".format(table=self.table)
)
print(self.table_schema_query)
# self.table_schema = self.get_schema()
# Need a mapping from header to MYSQL TYPE
# for mapping in self.table_schema:
# column = mapping["column_name"]
# type_string = mapping["column_type"]
# this_type = ds.MySQLTypeSystem.type_mapping(type_string)
# Unfinished experimental code
# Start here:
# store the type_mapping
# use it to cast the data into the MySQLTypeSchema
# ensure that the generator is emitting MySQLTypeSchema objects
# def get_schema(self):
# self.cursor.execute(self.table_schema_query)
# table_schema = self.cursor.fetchall()
# return table_schema
def generator(self):
if self.send_batch_markers:
yield BatchStart(schema=self.table_schema)
self.cursor.execute("""SELECT * FROM {table};""".format(table=self.table))
result = self.cursor.fetchone()
while result is not None:
yield result
result = self.cursor.fetchone()
if self.send_batch_markers:
yield BatchEnd()
class PrinterOfThings(MetalNode):
@set_kwarg_attributes()
def __init__(self, disable=False, pretty=False, prepend="printer: ", **kwargs):
self.disable = disable
self.pretty = pretty
super(PrinterOfThings, self).__init__(**kwargs)
logging.debug("Initialized printer...")
def process_item(self):
if not self.disable:
print(self.prepend)
if self.pretty:
pprint.pprint(self.__message__, indent=2)
else:
print(str(self.__message__))
print("\n")
print("------------")
yield self.message
class ConstantEmitter(MetalNode):
"""
Send a thing every n seconds
"""
def __init__(self, thing=None, max_loops=5, delay=0.5, **kwargs):
self.thing = thing
self.delay = delay
self.max_loops = max_loops
super(ConstantEmitter, self).__init__(**kwargs)
def generator(self):
counter = 0
while counter < self.max_loops:
if random.random() < -0.1:
assert False
time.sleep(self.delay)
yield self.thing
counter += 1
class TimeWindowAccumulator(MetalNode):
"""
Every N seconds, put the latest M seconds data on the queue.
"""
@set_kwarg_attributes()
def __init__(self, time_window=None, send_interval=None, **kwargs):
pass
class LocalFileReader(MetalNode):
@set_kwarg_attributes()
def __init__(
self,
directory=".",
send_batch_markers=True,
serialize=False,
read_mode="r",
filename=None,
**kwargs
):
super(LocalFileReader, self).__init__(**kwargs)
def process_item(self):
filename = "/".join([self.directory, self.filename or self.__message__])
with open(filename, self.read_mode) as file_obj:
if self.serialize:
for line in file_obj:
output = line
yield output
else:
output = file_obj.read()
yield output
class CSVReader(MetalNode):
@set_kwarg_attributes()
def __init__(self, **kwargs):
super(CSVReader, self).__init__(**kwargs)
def process_item(self):
file_obj = io.StringIO(self.__message__)
reader = csv.DictReader(file_obj)
for row in reader:
yield row
class LocalDirectoryWatchdog(MetalNode):
def __init__(self, directory=".", check_interval=3, **kwargs):
self.directory = directory
self.latest_arrival = time.time()
self.check_interval = check_interval
super(LocalDirectoryWatchdog, self).__init__(**kwargs)
def generator(self):
while self.keep_alive:
logging.debug("sleeping...")
time.sleep(self.check_interval)
time_in_interval = None
for filename in os.listdir(self.directory):
last_modified_time = os.path.getmtime(
"/".join([self.directory, filename])
)
if last_modified_time > self.latest_arrival:
yield "/".join([self.directory, filename])
if (
time_in_interval is None
or last_modified_time > time_in_interval
):
time_in_interval = last_modified_time
logging.debug("time_in_interval: " + str(time_in_interval))
if time_in_interval is not None:
self.latest_arrival = time_in_interval
class StreamingJoin(MetalNode):
"""
Joins two streams on a key, using exact match only. MVP.
"""
def __init__(self, window=30, streams=None, *args, **kwargs):
self.window = window
self.streams = streams
self.stream_paths = streams
self.buffers = {
stream_name: TimedDict(timeout=self.window)
for stream_name in self.stream_paths.keys()
}
super(StreamingJoin, self).__init__(*args, **kwargs)
def process_item(self):
"""
"""
value_to_match = get_value(
self.message, self.stream_paths[self.message_source.name]
)
# Check for matches in all other streams.
# If complete set of matches, yield the merged result
# If not, add it to the `TimedDict`.
yield ("hi")
class DynamicClassMediator(MetalNode):
def __init__(self, *args, **kwargs):
super(DynamicClassMediator, self).__init__(**kwargs)
for node_name, node_dict in self.node_dict.items():
cls_obj = node_dict["cls_obj"]
node_obj = cls_obj(**kwargs)
node_dict["obj"] = node_obj
for edge in self.raw_config["edges"]:
source_node_obj = self.node_dict[edge["from"]]["obj"]
target_node_obj = self.node_dict[edge["to"]]["obj"]
source_node_obj > target_node_obj
def bind_methods():
for attr_name in dir(DynamicClassMediator):
if attr_name.startswith("_"):
continue
attr_obj = getattr(DynamicClassMediator, attr_name)
if not isinstance(attr_obj, types.FunctionType):
continue
setattr(self, attr_name, types.MethodType(attr_obj, self))
bind_methods()
source = self.get_source()
self.input_queue_list = source.input_queue_list
sink = self.get_sink()
self.output_queue_list = sink.output_queue_list
self.output_node_list = sink.output_node_list
self.input_node_list = source.input_node_list
def get_sink(self):
sinks = self.sink_list()
if len(sinks) > 1:
raise Exception("`DynamicClassMediator` may have no more than one sink.")
elif len(sinks) == 0:
return None
return sinks[0]
def get_source(self):
sources = self.source_list()
if len(sources) > 1:
raise Exception("`DynamicClassMediator` may have no more than one source.")
elif len(sources) == 0:
return None
return sources[0]
def sink_list(self):
sink_nodes = []
for node_name, node_dict in self.node_dict.items():
node_obj = node_dict["obj"]
if len(node_obj.output_queue_list) == 0:
sink_nodes.append(node_obj)
return sink_nodes
def source_list(self):
source_nodes = [
node_dict["obj"]
for node_dict in self.node_dict.values()
if node_dict["obj"].is_source
]
return source_nodes
def hi(self):
return "hi"
def get_node_dict(node_config):
node_dict = {}
for node_config in node_config["nodes"]:
node_class = globals()[node_config["class"]]
node_name = node_config["name"]
node_dict[node_name] = {}
node_dict[node_name]["class"] = node_class
frozen_arguments = node_config.get("frozen_arguments", {})
node_dict[node_name]["frozen_arguments"] = frozen_arguments
node_obj = node_class(**frozen_arguments)
node_dict[node_name]["remapping"] = node_config.get("arg_mapping", {})
return node_dict
def kwarg_remapper(f, **kwarg_mapping):
reverse_mapping = {value: key for key, value in kwarg_mapping.items()}
logging.debug("kwarg_mapping:" + str(kwarg_mapping))
parameters = [i for i, _ in list(inspect.signature(f).parameters.items())]
for kwarg in parameters:
if kwarg not in kwarg_mapping:
reverse_mapping[kwarg] = kwarg
def remapped_function(*args, **kwargs):
remapped_kwargs = {}
for key, value in kwargs.items():
if key in reverse_mapping:
remapped_kwargs[reverse_mapping[key]] = value
logging.debug("renamed function with kwargs: " + str(remapped_kwargs))
return f(*args, **remapped_kwargs)
return remapped_function
def template_class(
class_name, parent_class, kwargs_remapping, frozen_arguments_mapping
):
kwargs_remapping = kwargs_remapping or {}
frozen_init = functools.partial(parent_class.__init__, **frozen_arguments_mapping)
if isinstance(parent_class, (str,)):
parent_class = globals()[parent_class]
cls = type(class_name, (parent_class,), {})
setattr(cls, "__init__", kwarg_remapper(frozen_init, **kwargs_remapping))
return cls
def class_factory(raw_config):
new_class = type(raw_config["name"], (DynamicClassMediator,), {})
new_class.node_dict = get_node_dict(raw_config)
new_class.class_name = raw_config["name"]
new_class.edge_list_dict = raw_config.get("edges", [])
new_class.raw_config = raw_config
for node_name, node_config in new_class.node_dict.items():
_class = node_config["class"]
cls = template_class(
node_name,
_class,
node_config["remapping"],
node_config["frozen_arguments"],
)
setattr(cls, "raw_config", raw_config)
node_config["cls_obj"] = cls
# Inject?
globals()[new_class.__name__] = new_class
return new_class
class Remapper(MetalNode):
def __init__(self, mapping=None, **kwargs):
self.remapping_dict = mapping or {}
super(Remapper, self).__init__(**kwargs)
def process_item(self):
logging.debug("Remapper {node}:".format(node=self.name) + str(self.__message__))
out = remap_dictionary(self.__message__, self.remapping_dict)
yield out
class BlackHole(MetalNode):
"""
Accepts any incoming message and promptly ignores it. Returns ``NothingToSeeHere``.
"""
def __init__(self, **kwargs):
super(BlackHole, self).__init__(**kwargs)
def process_item(self):
logging.debug(
"BlackHole {node}:".format(node=self.name) + str(self.__message__)
)
yield NothingToSeeHere()
class Blocker(BlackHole):
"""
Class that ignores all messages, but sends a message when all of its upstream
nodes have finished.
"""
def __init__(self, **kwargs):
kwargs.update({"send_termination_message": | |
ints
for cluster, nodes in clusters.items():
clusters[cluster] = sorted([int(node) for node in nodes])
remove_outliers(clusters)
clustered = []
for cluster, nodes in clusters.items():
for n in nodes:
clustered.append(n)
clusters['singles'] = [] # Add singles to clusters if not in top n scores
clusters['removed'] = []
for node in projection_2D:
if node not in clustered and node not in drop:
clusters['singles'].append(node)
elif node in drop:
clusters['removed'].append(node)
G = nx.Graph()
for pair in flat:
G.add_edge(int(pair[0]), int(pair[1]), weight=pair[2])
#if you want to see directionality in the networkx plot
#G = nx.MultiDiGraph(G)
#adds singles if not in top n scores
for node_key in projection_2D:
if node_key not in G.nodes:
G.add_node(node_key)
return flat, clusters, G
def plot_slicem_network(self, network_from, frame):
#TODO: adjust k, scale for clearer visualization
G_subset = G.copy()
color_dict = {i: color for i, color in enumerate(colors)}
node_dict = {node: i for i, node in enumerate(G.nodes)}
for d in drop:
G_subset.remove_node(d)
color_dict.pop(node_dict[d])
color_subset = [color for k, color in color_dict.items()]
if network_from == 'knn':
positions = nx.spring_layout(G_subset, weight='weight', k=0.3, scale=3.5)
else:
positions = nx.spring_layout(G_subset, weight='weight', k=0.18, scale=1.5)
f = Figure(figsize=(8,5))
a = f.add_subplot(111)
a.axis('off')
nx.draw_networkx_nodes(G_subset, positions, ax=a, edgecolors='black', linewidths=2,
node_size=300, alpha=0.65, node_color=color_subset)
nx.draw_networkx_edges(G_subset, positions, ax=a, width=1, edge_color='grey')
nx.draw_networkx_labels(G_subset, positions, ax=a, font_weight='bold', font_size=10)
if self.netcanvas:
self.netcanvas.get_tk_widget().destroy()
self.nettoolbar.destroy()
self.netcanvas = FigureCanvasTkAgg(f, frame)
self.netcanvas.draw()
self.netcanvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.netcanvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.nettoolbar = NavigationToolbar2Tk(self.netcanvas, frame)
self.nettoolbar.update()
def plot_tiles(self):
"""plot 2D class avgs sorted and colored by cluster"""
#TODO: adjust plot, border and text_box sizes
ordered_projections = []
flat_clusters = []
colors_2D = []
for cluster, nodes in clusters.items():
for n in nodes:
ordered_projections.append(projection_2D[n])
for n in nodes:
flat_clusters.append(n)
for i, n in enumerate(G.nodes):
if n in nodes:
colors_2D.append(colors[i])
grid_cols = int(np.ceil(np.sqrt(len(ordered_projections))))
if len(ordered_projections) <= (grid_cols**2 - grid_cols):
grid_rows = grid_cols - 1
else:
grid_rows = grid_cols
#assuming images are same size, get shape
l, w = ordered_projections[0].shape
#add blank images to pack in grid
while len(ordered_projections) < grid_rows*grid_cols:
ordered_projections.append(np.zeros((l, w)))
colors_2D.append((0., 0., 0.))
flat_clusters.append('')
f = Figure()
grid = ImageGrid(f, 111, #similar to subplot(111)
nrows_ncols=(grid_rows, grid_cols), #creates grid of axes
axes_pad=0.05) #pad between axes in inch
lw = 1.75
text_box_size = 5
props = dict(boxstyle='round', facecolor='white')
for i, (ax, im) in enumerate(zip(grid, ordered_projections)):
ax.imshow(im, cmap='gray')
for side, spine in ax.spines.items():
spine.set_color(colors_2D[i])
spine.set_linewidth(lw)
ax.get_yaxis().set_ticks([])
ax.get_xaxis().set_ticks([])
text = str(flat_clusters[i])
ax.text(1, 1, text, va='top', ha='left', bbox=props, size=text_box_size)
newWindow = tk.Toplevel()
newWindow.grid_rowconfigure(0, weight=1)
newWindow.grid_columnconfigure(0, weight=1)
#PLOT FRAME
plotFrame = tk.Frame(newWindow, bg='lightgrey', width=600, height=400)
plotFrame.grid(row=0, column=0, sticky='nsew')
canvas = FigureCanvasTkAgg(f, plotFrame)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
canvas.figure.tight_layout()
#TOOLBAR FRAME
toolbarFrame = ttk.Frame(newWindow, width=600, height=100)
toolbarFrame.grid(row=1, column=0, sticky='nsew')
toolbarFrame.grid_propagate(0)
toolbar = NavigationToolbar2Tk(canvas, toolbarFrame)
toolbar.update()
def plot_projections(self, p1, p2, frame):
if p1 == p2:
self.show_dif_class_msg()
else:
projection1 = extract_2D[p1]
projection2 = extract_2D[p2]
angle1 = complete_scores[p1, p2][0]
angle2 = complete_scores[p1, p2][1]
ref = transform.rotate(projection1, angle1, resize=True)
comp = transform.rotate(projection2, angle2, resize=True)
ref_square, comp_square = make_equal_square_images(ref, comp)
ref_intensity = ref_square.sum(axis=0)
comp_intensity = comp_square.sum(axis=0)
y_axis_max = max(np.amax(ref_intensity), np.amax(comp_intensity))
y_axis_min = min(np.amin(ref_intensity), np.amin(comp_intensity))
f = Figure(figsize=(4,4))
spec = gridspec.GridSpec(ncols=2, nrows=2, figure=f)
tl = f.add_subplot(spec[0, 0])
tr = f.add_subplot(spec[0, 1])
bl = f.add_subplot(spec[1, 0])
br = f.add_subplot(spec[1, 1])
# PROJECTION_1
#2D projection image
tl.imshow(ref_square, cmap=plt.get_cmap('gray'), aspect='equal')
tl.axis('off')
#1D line projection
bl.plot(ref_intensity, color='black')
bl.xaxis.set_visible(False)
bl.yaxis.set_visible(False)
bl.set_ylim([y_axis_min, (y_axis_max + 0.025*y_axis_max)])
bl.fill_between(range(len(ref_intensity)), ref_intensity, alpha=0.5, color='deepskyblue')
# PROJECTION_2
#2D projection image
tr.imshow(comp_square, cmap=plt.get_cmap('gray'), aspect='equal')
tr.axis('off')
#lD line projection
br.plot(comp_intensity, color='black')
br.xaxis.set_visible(False)
br.yaxis.set_visible(False)
br.set_ylim([y_axis_min, (y_axis_max + 0.025*y_axis_max)])
br.fill_between(range(len(comp_intensity)), comp_intensity, alpha=0.5, color='yellow')
asp = np.diff(bl.get_xlim())[0] / np.diff(bl.get_ylim())[0]
bl.set_aspect(asp)
asp1 = np.diff(br.get_xlim())[0] / np.diff(br.get_ylim())[0]
br.set_aspect(asp)
f.tight_layout()
if self.projcanvas:
self.projcanvas.get_tk_widget().destroy()
self.projtoolbar.destroy()
self.projcanvas = FigureCanvasTkAgg(f, frame)
self.projcanvas.draw()
self.projcanvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.projcanvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.projtoolbar = NavigationToolbar2Tk(self.projcanvas, frame)
self.projtoolbar.update()
def overlay_lines(self, p1, p2, FT, frame):
"""overlays line projections at optimum angle between two class averages"""
if p1 == p2:
self.show_dif_class_msg()
else:
a1 = complete_scores[p1, p2][0]
a2 = complete_scores[p1, p2][1]
projection1 = make_1D(extract_2D[p1], a1)
projection2 = make_1D(extract_2D[p2], a2)
if FT:
pad_p1 = np.pad(projection1.vector, pad_width=(0, shape-projection1.size()))
pad_p2 = np.pad(projection2.vector, pad_width=(0, shape-projection2.size()))
A = abs(np.fft.rfft(pad_p1))
B = abs(np.fft.rfft(pad_p2))
f = Figure(figsize=(8,4))
ax = f.add_subplot(111)
ax.bar(range(len(A)), A, alpha=0.35, color='deepskyblue', ec='k', linewidth=1)
ax.bar(range(len(B)), B, alpha=0.35, color='yellow', ec='k', linewidth=1)
ax.get_xaxis().set_ticks([])
ax.set_xlabel('frequency component')
ax.set_ylabel('Amplitude')
else:
a2_flip = complete_scores[p1, p2][1] + 180
projection2_flip = make_1D(extract_2D[p2], a2_flip)
score_default, r, c = slide_score(projection1, projection2) # Score and location of optimum
score_flip, r_flip, c_flip = slide_score(projection1, projection2_flip) # Score of phase flipped
if score_default <= score_flip:
ref_intensity, comp_intensity = r, c
else:
ref_intensity, comp_intensity = r_flip, c_flip
f = Figure(figsize=(8,4))
ax = f.add_subplot(111)
x_axis_max = len(ref_intensity)
y_axis_max = max(np.amax(ref_intensity), np.amax(comp_intensity))
y_axis_min = min(np.amin(ref_intensity), np.amin(comp_intensity))
ax.plot(ref_intensity, color='black')
ax.plot(comp_intensity, color='black')
ax.fill_between(range(len(ref_intensity)), ref_intensity, alpha=0.35, color='deepskyblue')
ax.fill_between(range(len(comp_intensity)), comp_intensity, alpha=0.35, color='yellow')
ax.set_ylabel('Intensity')
ax.set_ylim([y_axis_min, (y_axis_max + 0.025*y_axis_max)])
ax.xaxis.set_visible(False)
f.tight_layout()
if self.projcanvas:
self.projcanvas.get_tk_widget().destroy()
self.projtoolbar.destroy()
self.projcanvas = FigureCanvasTkAgg(f, frame)
self.projcanvas.draw()
self.projcanvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.projcanvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.projtoolbar = NavigationToolbar2Tk(self.projcanvas, frame)
self.projtoolbar.update()
def write_star_files(self, star_input, outpath):
"""split star file into new star files based on clusters"""
with open(star_input, 'r') as f:
table = parse_star(f)
cluster_star = {}
for cluster, nodes in clusters.items():
if nodes:
#convert to str to match df
#add 1 to match RELION indexing
avgs = [str(node+1) for node in nodes]
subset = table[table['ClassNumber'].isin(avgs)]
cluster_star[cluster] = subset
for cluster, table in cluster_star.items():
with open(outpath+'/slicem_cluster_{0}.star'.format(cluster), 'w') as f:
#write the star file
print('data_', file=f)
print('loop_', file=f)
for i, name in enumerate(table.columns):
print('_rln' + name + ' #' + str(i+1), file=f)
table.to_csv(f, sep='\t', index=False, header=False)
with open(outpath+'/slicem_clusters.txt', 'w') as f:
for cluster, averages in clusters.items():
f.write(str(cluster) + '\t' + str(averages) + '\n')
print('star files written!')
def write_edge_list(self, network, outpath):
with open(outpath+'/slicem_edge_list.txt', 'w') as f:
f.write('projection_1'+'\t'+'projection_2'+'\t'+'score'+'\n')
for t in flat:
f.write(str(t[0])+'\t'+str(t[1])+'\t'+str(t[2])+'\n')
if network == 'top_n':
if clusters['singles']:
for single in clusters['singles']:
f.write(str(single)+'\n')
print('edge list written!')
#Utility functions from main script to make GUI standalone
def extract_class_avg(avg):
"""fit in minimal bounding box"""
image = avg.copy()
image[image < 0] = 0
struct = np.ones((2, 2), dtype=bool)
dilate = ndi.binary_dilation(image, struct)
labeled = measure.label(dilate, connectivity=2)
rprops = measure.regionprops(labeled, image, cache=False)
if len(rprops) == 1:
select_region = 0
else:
img_y, img_x = image.shape
if labeled[int(img_y/2), int(img_x/2)] != 0: # Check for central region
select_region = labeled[int(img_y/2), int(img_x/2)] - 1 # For index
else:
distances = [
(i, np.linalg.norm(np.array((img_y/2, img_x/2)) - np.array(r.weighted_centroid)))
for i, r in enumerate(rprops)
]
select_region = min(distances, key=lambda x: x[1])[0] # Pick first closest region
y_min, x_min, y_max, x_max = [p for p in rprops[select_region].bbox]
return image[y_min:y_max, x_min:x_max]
def nearest_neighbors(neighbors):
"""group k best scores for each class average to construct graph"""
projection_knn = {}
order_scores = {avg: [] for avg in range(num_class_avg)}
for d in drop:
order_scores.pop(d, None)
#projection_knn[projection_1] = [projection_2, angle_1, angle_2, score]
for pair, values in scores_update.items():
p1, p2 = [p for p in pair]
a1, a2, s = [v for v in values]
c = [p2, a1, a2, s]
order_scores[p1].append(c)
# Zscore per class avg for edge
for projection, scores in order_scores.items():
all_scores = [v[3] for v in scores]
u = np.mean(all_scores)
s = np.std(all_scores)
for v in scores:
zscore = (v[3] - u)/s
v[3] = zscore
for avg, scores in order_scores.items():
sort = sorted(scores, reverse=False, key=lambda x: x[3])[:neighbors]
projection_knn[avg] = sort
return projection_knn
def remove_outliers(clusters):
"""
Use median absolute deviation to remove outliers
<NAME> and <NAME> (1993)
"""
pixel_sums = {}
outliers = []
for cluster, nodes in clusters.items():
if len(nodes) > 1:
pixel_sums[cluster] = []
for node in nodes:
pixel_sums[cluster].append(sum(sum(extract_2D[node])))
for cluster, psums in pixel_sums.items():
med = np.median(psums)
m_psums = [abs(x - med) for x in psums]
mad = np.median(m_psums)
if mad == 0:
next
else:
for i, proj in enumerate(psums):
z = 0.6745*(proj - med)/mad
if abs(z) > 3.5:
outliers.append((cluster, clusters[cluster][i]))
clusters["outliers"] = [o[1] for o in outliers]
| |
"http://127.0.0.1:9200")
error.append(error6)
h, error7 = a.process_action(0, "schedule-add-client", {'--uuid':f[9][1], '--client_id':g[5][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error7)
x, error8 = a.process_action(0, "schedule-remove-client", {'--uuid':f[9][1], '--client_id':g[5][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error8)
return x, error
def process_client_remove_schedule(self):
"""
Tests client-remove-schedule action.
:return: returns any data and a list of any errors
"""
error = []
a = hemlock.Hemlock()
m_server = self.connect_mysql(0, "localhost", "travis", "password", "<PASSWORD>")
b, error1 = a.process_action(0, "tenant-create", {'--name':'tenant1'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error1)
c, error2 = a.process_action(0, "register-local-system", {'--name':'local-system1', '--data_type':'data-type1', '--description': 'description1', '--tenant_id':b[2][1], '--hostname':'hostname1', '--endpoint':'http://endpoint.com/', '--poc_name':'poc-name1', '--poc_email':'p<EMAIL>'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error2)
d, error3 = a.process_action(0, "client-store", {'--name':'client1', '--type':'mysql', '--system_id':c[9][1], '--credential_file':'hemlock/clients/mysql_creds_sample'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error3)
e, error4 = a.process_action(0, "schedule-server-create", {'--name':'asdf'}, m_server, "localhost", "hemlock", "hemlock", "password", 1, "http://127.0.0.1:9200")
error.append(error4)
f, error5 = a.process_action(0, "client-schedule", {'--name':'asdf', '--minute':'1', '--hour':'1', '--day_of_month':'1', '--month':'1', '--day_of_week':'1', '--client_id':d[5][1], '--schedule_server_id':e[2][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 1, "http://127.0.0.1:9200")
error.append(error5)
g, error6 = a.process_action(0, "client-store", {'--name':'client1', '--type':'mysql', '--system_id':c[9][1], '--credential_file':'hemlock/clients/mysql_creds_sample'}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error6)
h, error7 = a.process_action(0, "client-schedule", {'--name':'asdf', '--minute':'1', '--hour':'1', '--day_of_month':'1', '--month':'1', '--day_of_week':'1', '--client_id':g[5][1], '--schedule_server_id':e[2][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error7)
i, error8 = a.process_action(0, "client-add-schedule", {'--uuid':d[5][1], '--schedule_id':h[9][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error8)
x, error9 = a.process_action(0, "client-remove-schedule", {'--uuid':d[5][1], '--schedule_id':h[9][1]}, m_server, "localhost", "hemlock", "hemlock", "password", 0, "http://127.0.0.1:9200")
error.append(error9)
return x, error
def process_hemlock_scheduler(self):
"""
Tests hemlock_scheduler.py.
"""
sys.argv = ["", "hemlock_creds", "asdf"]
a = Hemlock_Scheduler()
a.init_schedule()
a.check_schedules()
def process_hemlock_scheduler2(self):
"""
Tests hemlock_scheduler.py.
"""
sys.argv = ["", "hemlock_creds", "asdf"]
b = Hemlock_Base()
b.SERVER_CREDS_FILE = "hemlock/hemlock_creds_sample"
client_dict, server_dict = b.get_creds(0, "hemlock/clients/mysql_creds_sample")
sys.argv = ["", "hemlock_creds", "asdf"]
a = Hemlock_Scheduler()
a.job_work(server_dict, "foo")
def connect_mysql(self, debug, server, user, pw, db):
"""
Makes a connection to the test Hemlock MySQL server.
:return: returns an instance of the MySQL connection
"""
a = hemlock.Hemlock()
m_server = a.mysql_server(debug, server, user, pw, db)
return m_server
def process_hemlock_process_args(self):
"""
Tests hemlock process_args.
:return: returns any data and a list of any errors
"""
error = []
a = hemlock.Hemlock()
x = a.process_args(0, ["client"])
def process_base_args1(self):
"""
Tests hemlock_base without args.
:return: returns any data and a list of any errors
"""
error = []
a = Hemlock_Base()
x = a.process_args(0, [])
def process_base_args2(self):
"""
Tests hemlock_base with args.
:return: returns any data and a list of any errors
"""
error = []
a = Hemlock_Base()
a, b, c = a.process_args(0, ['--uuid', 'asdf', '--client', 'asdf', '--splits',10])
x = a+b+str(c)
return x, error
def process_base_send_data(self):
"""
Tests hemlock_base send_data.
:return: returns any data and a list of any errors
"""
error = []
a = Hemlock_Base()
a.SERVER_CREDS_FILE = "hemlock/hemlock_creds_sample"
client_dict, server_dict = a.get_creds(0, "hemlock/clients/mysql_creds_sample")
h_server = a.connect_server(0, server_dict, 1)
x = a.send_data(0, [[]], [], h_server, "asdf", 1)
return x, error
def process_base_connect_server_couchbase(self):
"""
Tests hemlock_base connect_server with couchbase.
:return: returns any data and a list of any errors
"""
error = []
x = ""
a = Hemlock_Base()
a.SERVER_CREDS_FILE = "hemlock/hemlock_creds_sample"
client_dict, server_dict = a.get_creds(0, "hemlock/clients/mysql_creds_sample")
h_server = a.connect_server(0, server_dict, 0)
def process_hfs_old_process_files(self):
"""
Tests hfs_old process_files.
:return: returns any data and a list of any errors
"""
error = []
x = ""
a = Hemlock_Base()
a.SERVER_CREDS_FILE = "hemlock/hemlock_creds_sample"
client_dict, server_dict = a.get_creds(0, "hemlock/clients/mysql_creds_sample")
h_server = a.connect_server(0, server_dict, 1)
a = hfs_old()
a.process_files(0, "docs/_build/", h_server, "asdf", 0)
return x, error
def process_hfs_connect_client(self):
"""
Tests hfs connect_client.
:return: returns any data and a list of any errors
"""
error = []
a = hfs()
x = a.connect_client(0, {'FILE_PATH':'docs/_build/'})
x = a.connect_client(0, {})
def process_hfs_get_data(self):
"""
Tests hfs get_data.
:return: returns any data and a list of any errors
"""
error = []
a = Hemlock_Base()
a.SERVER_CREDS_FILE = "hemlock/hemlock_creds_sample"
client_dict, server_dict = a.get_creds(0, "hemlock/clients/fs_creds_sample")
h_server = a.connect_server(0, server_dict, 1)
a = hfs()
c_server = a.connect_client(0, {'FILE_PATH':'docs/_build/'})
x = a.get_data(0, client_dict, c_server, h_server, "asdf", 0)
return x, error
def process_hfs_format_lists(self):
"""
Tests hfs format_lists.
:return: returns any data and a list of any errors
"""
error = []
x = ""
a = Hemlock_Base()
a.SERVER_CREDS_FILE = "hemlock/hemlock_creds_sample"
client_dict, server_dict = a.get_creds(0, "hemlock/clients/fs_creds_sample")
h_server = a.connect_server(0, server_dict, 1)
a = hfs()
a.format_lists(0, ["'[0]'"], h_server, "asdf", 0)
return x, error
def process_hstream_odd_start(self):
"""
Tests hstream_odd start.
:return: returns any data and a list of any errors
"""
error = []
x = ""
a = Hemlock_Base()
a.SERVER_CREDS_FILE = "hemlock/hemlock_creds_sample"
client_dict, server_dict = a.get_creds(0, "hemlock/clients/stream_odd_creds_sample")
h_server = a.connect_server(0, server_dict, 1)
a = HStream_Odd()
a.flag = 0
a.connect_client(0, client_dict, h_server, "asdf", 0)
a.start(0, "localhost", 50000, h_server, "asdf", 0, 0)
handle(0, "bogus-conn", "bogus-address", h_server, "asdf", 0, 0)
return x, error
def process_hmysql_connect_client(self):
"""
Tests hmysql connect_client.
:return: returns any data and a list of any errors
"""
error = []
a = Hemlock_Base()
a.SERVER_CREDS_FILE = "hemlock/hemlock_creds_sample"
client_dict, server_dict = a.get_creds(0, "hemlock/clients/mysql_creds_sample")
h_server = a.connect_server(0, server_dict, 1)
a = HMysql()
c_server = a.connect_client(0, client_dict)
x = a.get_data(0, client_dict, c_server, h_server, "asdf", 0)
return x, error
def process_hmysql_connect_client2(self):
"""
Tests hmysql connect_client.
:return: returns any data and a list of any errors
"""
error = []
a = Hemlock_Base()
a.SERVER_CREDS_FILE = "hemlock/hemlock_creds_sample"
client_dict, server_dict = a.get_creds(0, "hemlock/clients/mysql_creds_sample")
client_dict['MYSQL_TABLE'] = "tenants"
h_server = a.connect_server(0, server_dict, 1)
a = HMysql()
c_server = a.connect_client(0, client_dict)
x = a.get_data(0, client_dict, c_server, h_server, "asdf", 0)
return x, error
def process_hredis_connect_client(self):
"""
Tests hredis connect_client.
:return: returns any data and a list of any errors
"""
error = []
a = Hemlock_Base()
a.SERVER_CREDS_FILE = "hemlock/hemlock_creds_sample"
client_dict, server_dict = a.get_creds(0, "hemlock/clients/redis_creds_sample")
h_server = a.connect_server(0, server_dict, 1)
a = HRedis()
c_server = a.connect_client(0, client_dict)
x = a.get_data(0, client_dict, c_server, h_server, "asdf", 0)
return x, error
def process_hmongo_connect_client(self):
"""
Tests hmongo connect_client.
:return: returns any data and a list of any errors
"""
error = []
a = Hemlock_Base()
a.SERVER_CREDS_FILE = "hemlock/hemlock_creds_sample"
client_dict, server_dict = a.get_creds(0, "hemlock/clients/mongo_creds_sample")
h_server = a.connect_server(0, server_dict, 1)
a = HMongo()
c_server = a.connect_client(0, client_dict)
x = a.get_data(0, client_dict, c_server, h_server, "asdf", 0)
return x, error
def process_hrest_connect_client(self):
"""
Tests hrest connect_client.
:return: returns any data and a list of any errors
"""
error = []
a = Hemlock_Base()
a.SERVER_CREDS_FILE = "hemlock/hemlock_creds_sample"
client_dict, server_dict = a.get_creds(0, "hemlock/clients/rest_creds_sample")
h_server = a.connect_server(0, server_dict, 1)
a = HRest()
a.connect_client(0, client_dict)
def process_hrest_get_data(self):
"""
Tests hrest get_data.
:return: returns any data and a list of any errors
"""
error = []
a = Hemlock_Base()
a.SERVER_CREDS_FILE = "hemlock/hemlock_creds_sample"
client_dict, server_dict = a.get_creds(0, "hemlock/clients/rest_creds_sample")
h_server = a.connect_server(0, server_dict, 1)
a = HRest()
a.get_data(0, client_dict, "bogus", h_server, "asdf", 0)
def process_hemlock_base_update_hemlock(self):
"""
Tests hemlock_base update_hemlock.
:return: returns any data and a list of any errors
"""
error = []
a = Hemlock_Base()
a.SERVER_CREDS_FILE = "hemlock/hemlock_creds_sample"
client_dict, server_dict = a.get_creds(0, "hemlock/clients/mysql_creds_sample")
a.update_hemlock(0, "asdf", server_dict)
def process_hemlock_runner_mysql_server(self):
"""
Tests hemlock_runner mysql_server.
:return: returns any data and a list of any errors
"""
error = []
x = ""
a = Hemlock_Runner()
x = a.mysql_server(0, "localhost", "travis", "password", "<PASSWORD>")
return x, error
def process_hemlock_runner_mysql_server2(self):
"""
Tests hemlock_runner mysql_server.
:return: returns any data and a list of any errors
"""
error = []
x = ""
a = Hemlock_Runner()
x = a.mysql_server(0, "localhost", "travis", "bad_password", "<PASSWORD>")
# call tests
def test_instanciate(self):
"""
Calls the test function for instanciation.
"""
a = hemlock.Hemlock()
a.check_args(["tenant-list"], [], {})
a.check_args(["tenant-list"], ["tenant-list"], {})
a.client_add_schedule([], {})
a.client_get([], {})
a.client_list([], {})
a.client_purge([], {})
a.client_remove_schedule([], {})
a.client_run([], {})
a.client_schedule([], {})
a.client_schedules_list([], {})
a.client_store([], {})
a.client_systems_list([], {})
a.deregister_local_system([], {})
a.deregister_remote_system([], {})
a.hemlock_server_store([], {})
a.list_all([], {})
a.query_data([], {})
a.register_local_system([], {})
a.register_remote_system([], {})
a.role_create([], {})
a.role_delete([], {})
a.role_get([], {})
a.role_list([], {})
a.role_users_list([], {})
a.schedule_add_client([], {})
a.schedule_clients_list([], {})
a.schedule_delete([], {})
a.schedule_get([], {})
a.schedule_list([], {})
a.schedule_remove_client([], {})
a.schedule_change_server([], {})
a.schedule_server_create([], {})
a.schedule_server_delete([], {})
a.schedule_server_get([], {})
a.schedule_server_list([], {})
a.start_scheduler([], {})
a.system_add_tenant([], {})
a.system_clients_list([], {})
a.system_get([], {})
a.system_list([], {})
a.system_remove_tenant([], {})
a.system_tenants_list([], {})
a.tenant_create([], {})
a.tenant_delete([], {})
a.tenant_get([], {})
a.tenant_list([], {})
a.tenant_systems_list([], {})
a.tenant_users_list([], {})
a.user_add_role([], {})
a.user_add_tenant([], {})
a.user_create([], {})
a.user_delete([], {})
a.user_get([], {})
a.user_list([], {})
a.user_remove_role([], | |
<filename>test/unit/neural_networks/pytorch/test_EncDecDyn.py
#
# Copyright (c) 2021 Idiap Research Institute, http://www.idiap.ch/
# Written by <NAME> <<EMAIL>>
#
import unittest
import copy
import os
import torch
import jsonpickle
from idiaptts.misc.utils import makedirs_safe
import idiaptts.src.neural_networks.pytorch.models.rnn_dyn as rnn_dyn
import idiaptts.src.neural_networks.pytorch.models.enc_dec_dyn as enc_dec_dyn
class TestEncDecDyn(unittest.TestCase):
out_dir = None
@classmethod
def setUpClass(cls):
cls.out_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
type(cls()).__name__)
makedirs_safe(cls.out_dir) # Create class name directory.
@classmethod
def tearDownClass(cls):
os.rmdir(cls.out_dir) # Remove class name directory, should be empty.
def _get_encoder_config(self, out_dim):
return enc_dec_dyn.Config.ModuleConfig(
name="Encoder",
config=rnn_dyn.Config(
in_dim=1,
layer_configs=[
rnn_dyn.Config.LayerConfig(
layer_type="Embedding", num_embeddings=2,
embedding_dim=4),
rnn_dyn.Config.LayerConfig(
layer_type="Conv1d", out_dim=6, kernel_size=3,
nonlin="ReLU", padding=1),
rnn_dyn.Config.LayerConfig(layer_type="BatchNorm1d"),
rnn_dyn.Config.LayerConfig(
layer_type="Linear", out_dim=out_dim)],
),
input_names=["phonemes"],
output_names=["phoneme_embeddings"],
process_group=0)
def _get_encoder_vae_pool_last_config(self, in_dim, out_dim):
return enc_dec_dyn.Config.ModuleConfig(
name="EncoderVAE",
config=rnn_dyn.Config(
in_dim=in_dim,
layer_configs=[
rnn_dyn.Config.LayerConfig(
layer_type="Conv1d", out_dim=2, num_layers=1,
kernel_size=3, stride=2, padding=1),
rnn_dyn.Config.LayerConfig(
layer_type="Conv1d", out_dim=4, num_layers=2,
kernel_size=3, stride=2, padding=1),
rnn_dyn.Config.LayerConfig(layer_type="GRU", out_dim=8),
rnn_dyn.Config.LayerConfig(layer_type="PoolLast",
batch_first=True),
rnn_dyn.Config.LayerConfig(layer_type="VAE",
out_dim=out_dim)]),
input_names=["acoustic_features"],
output_names=["emb_z", "emb_mu", "emb_logvar"],
process_group=0)
def _get_encoder_vae_config(self, in_dim, out_dim):
return enc_dec_dyn.Config.ModuleConfig(
name="EncoderVAE",
config=rnn_dyn.Config(
in_dim=in_dim,
layer_configs=[
rnn_dyn.Config.LayerConfig(
layer_type="Conv1d", out_dim=2, num_layers=1,
kernel_size=3, padding=1),
rnn_dyn.Config.LayerConfig(
layer_type="Conv1d", out_dim=4, num_layers=2,
kernel_size=3, padding=1),
rnn_dyn.Config.LayerConfig(layer_type="GRU", out_dim=8),
rnn_dyn.Config.LayerConfig(layer_type="VAE",
out_dim=out_dim)]),
input_names=["acoustic_features"],
output_names=["emb_z", "emb_mu", "emb_logvar"],
process_group=0)
def _get_encoder_embedding_config(self, out_dim):
return enc_dec_dyn.Config.ModuleConfig(
name="Embedding",
config=rnn_dyn.Config(
in_dim=1,
layer_configs=[
rnn_dyn.Config.LayerConfig(layer_type='Embedding',
num_embeddings=2,
embedding_dim=out_dim)
]),
input_names=['emb_idx'],
output_names=["emb_z"],
process_group=0
)
def _get_fixed_attention_config(self):
return enc_dec_dyn.Config.DecoderConfig(
attention_args={enc_dec_dyn.ATTENTION_GROUND_TRUTH: "attention_matrix"},
attention_config=enc_dec_dyn.FIXED_ATTENTION,
input_names="phoneme_embeddings",
name="FixedAttention",
output_names="upsampled_phoneme_embeddings",
process_group=1,
n_frames_per_step=1
)
def _get_fixed_attention_decoder_config(self, audio_encoder_dim, in_dim,
out_dim, n_frames_per_step,
p_teacher_forcing):
return enc_dec_dyn.Config.DecoderConfig(
attention_args={enc_dec_dyn.ATTENTION_GROUND_TRUTH: "attention_matrix"},
attention_config=enc_dec_dyn.FIXED_ATTENTION,
teacher_forcing_input_names=["acoustic_features"],
config=rnn_dyn.Config(
in_dim=in_dim,
layer_configs=[
rnn_dyn.Config.LayerConfig(
layer_type="FC", out_dim=8, nonlin="RELU"),
rnn_dyn.Config.LayerConfig(layer_type="LSTM", out_dim=4)
]
),
input_names=["phoneme_embeddings", "emb_z"],
name="Decoder",
n_frames_per_step=n_frames_per_step,
p_teacher_forcing=p_teacher_forcing,
pre_net_config=rnn_dyn.Config(
in_dim=out_dim,
layer_configs=[
rnn_dyn.Config.LayerConfig(
layer_type="linear", out_dim=audio_encoder_dim,
nonlin="relu", num_layers=2)
]
),
process_group=1,
projection_configs=[
enc_dec_dyn.Config.ProjectionConfig(
config=rnn_dyn.Config(
in_dim=4,
layer_configs=[rnn_dyn.Config.LayerConfig(
layer_type="FC", out_dim=out_dim * n_frames_per_step)]),
name="AcousticFeaturesProjector",
output_names=["pred_intermediate_acoustic_features"],
out_dim=out_dim,
is_autoregressive_input=True
)
]
)
def _get_parallel_decoder_config(self, in_dim, out_dim):
return enc_dec_dyn.Config.ModuleConfig(
config=rnn_dyn.Config(
in_dim=in_dim,
layer_configs=[
rnn_dyn.Config.LayerConfig(layer_type="FC", out_dim=8,
nonlin="RELU", dropout=0.1),
rnn_dyn.Config.LayerConfig(layer_type="LSTM",
out_dim=out_dim, dropout=0.1)
]
),
input_names=["upsampled_phoneme_embeddings", "emb_z"],
name="ParallelDecoder",
process_group=2,
output_names=["pred_acoustic_features"]
)
def _get_postnet_config(self, out_dim):
return enc_dec_dyn.Config.ModuleConfig(
name="Postnet",
config=rnn_dyn.Config(
in_dim=out_dim,
layer_configs=[rnn_dyn.Config.LayerConfig(layer_type="Conv1d",
out_dim=4,
kernel_size=3),
rnn_dyn.Config.LayerConfig(
layer_type="BatchNorm1d"),
rnn_dyn.Config.LayerConfig(layer_type="ReLU"),
rnn_dyn.Config.LayerConfig(layer_type="Linear",
out_dim=out_dim)]
),
input_names=["pred_intermediate_acoustic_features"],
output_names=["pred_acoustic_features"],
process_group=2
)
def test_fixed_attention_auto_regressive_b1(self):
encoder_dim = 12
vae_dim = 6
audio_encoder_dim = 6
n_frames_per_step = 5
p_teacher_forcing = 0.5
decoder_dim = 15
model_config = enc_dec_dyn.Config(
modules=[
self._get_encoder_config(encoder_dim),
self._get_encoder_vae_pool_last_config(decoder_dim, vae_dim),
self._get_fixed_attention_decoder_config(
audio_encoder_dim, encoder_dim + audio_encoder_dim + vae_dim,
decoder_dim, n_frames_per_step, p_teacher_forcing),
self._get_postnet_config(decoder_dim)
]
)
model = model_config.create_model()
phoneme_seq_length = torch.tensor((10,), dtype=torch.long)
phoneme_max_length = torch.tensor(10, dtype=torch.long)
seq_length = torch.tensor((100,), dtype=torch.long)
max_length = torch.tensor(100, dtype=torch.long)
batch_size = 1
test_input = {}
test_input["phonemes"] = torch.ones(
[batch_size, phoneme_seq_length.max(), 1]).long()
test_input["acoustic_features"] = torch.ones(
[batch_size, seq_length.max(), decoder_dim])
test_input["attention_matrix"] = torch.zeros(
(batch_size, max_length, phoneme_max_length))
seq_length_dict = {"phonemes": phoneme_seq_length,
"acoustic_features": seq_length}
max_length_dict = {"phonemes": phoneme_max_length,
"acoustic_features": max_length}
model.init_hidden(batch_size)
output = model(test_input, seq_length_dict, max_length_dict)
self.assertEqual(torch.Size([batch_size, seq_length.max(), decoder_dim]),
output["pred_acoustic_features"].shape)
def test_fixed_attention_auto_regressive(self):
encoder_dim = 12
vae_dim = 6
audio_encoder_dim = 6
n_frames_per_step = 5
p_teacher_forcing = 0.5
decoder_dim = 15
model_config = enc_dec_dyn.Config(
modules=[
self._get_encoder_config(encoder_dim),
self._get_encoder_vae_pool_last_config(decoder_dim, vae_dim),
self._get_fixed_attention_decoder_config(
audio_encoder_dim, encoder_dim + audio_encoder_dim + vae_dim,
decoder_dim, n_frames_per_step, p_teacher_forcing),
self._get_postnet_config(decoder_dim)
]
)
model = model_config.create_model()
phoneme_seq_length = torch.tensor((10, 12), dtype=torch.long)
phoneme_max_length = torch.tensor(12, dtype=torch.long)
seq_length = torch.tensor((100, 75), dtype=torch.long)
max_length = torch.tensor(100, dtype=torch.long)
batch_size = 2
test_input = {}
test_input["phonemes"] = torch.ones(
[batch_size, phoneme_seq_length.max(), 1]).long()
test_input["acoustic_features"] = torch.ones(
[batch_size, seq_length.max(), decoder_dim])
test_input["attention_matrix"] = torch.zeros(
(batch_size, max_length, phoneme_max_length))
seq_length_dict = {"phonemes": phoneme_seq_length,
"acoustic_features": seq_length}
max_length_dict = {"phonemes": phoneme_max_length,
"acoustic_features": max_length}
model.init_hidden(batch_size)
output = model(test_input, seq_length_dict, max_length_dict)
self.assertEqual(torch.Size([batch_size,
phoneme_seq_length.max(),
encoder_dim]),
output["phoneme_embeddings"].shape)
self.assertEqual(torch.Size([batch_size, 1, vae_dim]),
output["emb_mu"].shape)
self.assertEqual(torch.Size(
[batch_size,
seq_length.max() // n_frames_per_step, phoneme_seq_length.max()]),
output["attention"].shape)
self.assertEqual(torch.Size(
[batch_size, seq_length.max(), decoder_dim]),
output["pred_intermediate_acoustic_features"].shape)
self.assertEqual(torch.Size(
[batch_size, seq_length.max(), decoder_dim]),
output["pred_acoustic_features"].shape)
self.assertTrue(
(phoneme_seq_length == seq_length_dict["phoneme_embeddings"]).any())
self.assertTrue(
(phoneme_max_length == max_length_dict["phoneme_embeddings"]).any())
self.assertTrue(
(torch.tensor((1, 1), dtype=torch.long) == max_length_dict["emb_mu"]).any())
self.assertTrue(
(seq_length == seq_length_dict["pred_intermediate_acoustic_features"]).any())
self.assertTrue(
(seq_length == seq_length_dict["pred_acoustic_features"]).any())
expected_params = 1 + 2 * 3 # Phoneme encoder: 1 Emb + 3 Conv (weight & bias)
expected_params += 2 * 3 + 4 + 1 # Acoustic encoder: 3 Conv + GRU + VAE projection
expected_params += 2 + 4 # Decoder: Linear + LSTM
expected_params += 2 * 2 # Decoder pre-net: 2 Linear
expected_params += 2 # Decoder projection: 1 Linear
expected_params += 3 * 2 # Postnet: 1 Conv + 1 BatchNorm1d + 1 Linear
self.assertEqual(expected_params, len([*model.named_parameters()]))
output["pred_acoustic_features"].sum().backward()
def test_fixed_attention_batched_b1(self):
encoder_dim = 12
vae_dim = 6
audio_encoder_dim = 6
n_frames_per_step = 5
p_teacher_forcing = 1
decoder_dim = 15
model_config = enc_dec_dyn.Config(
modules=[
self._get_encoder_config(encoder_dim),
self._get_encoder_vae_pool_last_config(decoder_dim, vae_dim),
self._get_fixed_attention_decoder_config(
audio_encoder_dim, encoder_dim + audio_encoder_dim + vae_dim,
decoder_dim, n_frames_per_step, p_teacher_forcing),
self._get_postnet_config(decoder_dim)
]
)
model = model_config.create_model()
phoneme_seq_length = torch.tensor((10,), dtype=torch.long)
phoneme_max_length = torch.tensor(10, dtype=torch.long)
seq_length = torch.tensor((100,), dtype=torch.long)
max_length = torch.tensor(100, dtype=torch.long)
batch_size = 1
test_input = {}
test_input["phonemes"] = torch.ones(
[batch_size, phoneme_seq_length.max(), 1]).long()
test_input["acoustic_features"] = torch.ones(
[batch_size, seq_length.max(), decoder_dim])
test_input["attention_matrix"] = torch.zeros(
(batch_size, max_length, phoneme_max_length))
seq_length_dict = {"phonemes": phoneme_seq_length,
"acoustic_features": seq_length,
"attention_matrix": seq_length}
max_length_dict = {"phonemes": phoneme_max_length,
"acoustic_features": max_length,
"attention_matrix": max_length}
model.init_hidden(batch_size)
output = model(test_input, seq_length_dict, max_length_dict)
self.assertEqual(torch.Size([batch_size, seq_length.max(), decoder_dim]),
output["pred_acoustic_features"].shape)
def test_fixed_attention_batched(self):
encoder_dim = 12
vae_dim = 6
audio_encoder_dim = 6
n_frames_per_step = 5
p_teacher_forcing = 1
decoder_dim = 15
model_config = enc_dec_dyn.Config(
modules=[
self._get_encoder_config(encoder_dim),
self._get_encoder_vae_pool_last_config(decoder_dim, vae_dim),
self._get_fixed_attention_decoder_config(
audio_encoder_dim, encoder_dim + audio_encoder_dim + vae_dim,
decoder_dim, n_frames_per_step, p_teacher_forcing),
self._get_postnet_config(decoder_dim)
]
)
model = model_config.create_model()
phoneme_seq_length = torch.tensor((10, 12), dtype=torch.long)
phoneme_max_length = torch.tensor(12, dtype=torch.long)
seq_length = torch.tensor((100, 75), dtype=torch.long)
max_length = torch.tensor(100, dtype=torch.long)
batch_size = 2
test_input = {}
test_input["phonemes"] = torch.ones(
[batch_size, phoneme_seq_length.max(), 1]).long()
test_input["acoustic_features"] = torch.ones(
[batch_size, seq_length.max(), decoder_dim])
test_input["attention_matrix"] = torch.zeros(
(batch_size, max_length, phoneme_max_length))
seq_length_dict = {"phonemes": phoneme_seq_length,
"acoustic_features": seq_length,
"attention_matrix": seq_length}
max_length_dict = {"phonemes": phoneme_max_length,
"acoustic_features": max_length,
"attention_matrix": max_length}
model.init_hidden(batch_size)
output = model(test_input, seq_length_dict, max_length_dict)
self.assertEqual(
torch.Size([batch_size, phoneme_seq_length.max(), encoder_dim]),
output["phoneme_embeddings"].shape)
self.assertEqual(torch.Size([batch_size, 1, vae_dim]),
output["emb_mu"].shape)
self.assertEqual(torch.Size([batch_size, seq_length.max(), decoder_dim]),
output["pred_intermediate_acoustic_features"].shape)
self.assertEqual(torch.Size([batch_size, seq_length.max(), decoder_dim]),
output["pred_acoustic_features"].shape)
self.assertTrue(
(phoneme_seq_length == seq_length_dict["phoneme_embeddings"]).any())
self.assertTrue(
(phoneme_max_length == max_length_dict["phoneme_embeddings"]).any())
self.assertTrue(
(torch.tensor((1, 1), dtype=torch.long) == max_length_dict["emb_mu"]).any())
self.assertTrue(
(seq_length == seq_length_dict["pred_intermediate_acoustic_features"]).any())
self.assertTrue(
(seq_length == seq_length_dict["pred_acoustic_features"]).any())
expected_params = 1 + 2 * 3 # Phoneme encoder: 1 Emb + 3 Conv (weight & bias)
expected_params += 2 * 3 + 4 + 1 # Acoustic encoder: 3 Conv + GRU + VAE projection
expected_params += 2 + 4 # Decoder: Linear + LSTM
expected_params += 2 * 2 # Decoder pre-net: 2 Linear
expected_params += 2 # Decoder projection: 1 Linear
expected_params += 3 * 2 # Postnet: 1 Conv + 1 BatchNorm1d + 1 Linear
self.assertEqual(expected_params, len([*model.named_parameters()]))
output["pred_acoustic_features"].sum().backward()
def test_fixed_attention_parallel_decoder_b1(self):
encoder_dim = 12
vae_dim = 6
audio_encoder_dim = 6
n_frames_per_step = 5
p_teacher_forcing = 1
decoder_dim = 15
model_config = enc_dec_dyn.Config(
modules=[
self._get_encoder_config(encoder_dim),
self._get_encoder_vae_config(decoder_dim, vae_dim),
self._get_fixed_attention_config(),
self._get_parallel_decoder_config(encoder_dim + vae_dim,
decoder_dim)
]
)
model = model_config.create_model()
phoneme_seq_length = torch.tensor((10,), dtype=torch.long)
phoneme_max_length = torch.tensor(10, dtype=torch.long)
seq_length = torch.tensor((100,), dtype=torch.long)
max_length = torch.tensor(100, dtype=torch.long)
batch_size = 1
test_input = {}
test_input["phonemes"] = torch.ones(
[batch_size, phoneme_seq_length.max(), 1]).long()
test_input["acoustic_features"] = torch.ones(
[batch_size, seq_length.max(), decoder_dim])
test_input["attention_matrix"] = torch.zeros(
(batch_size, max_length, phoneme_max_length))
seq_length_dict = {"phonemes": phoneme_seq_length,
"acoustic_features": seq_length,
"attention_matrix": seq_length}
max_length_dict = {"phonemes": phoneme_max_length,
"acoustic_features": max_length,
"attention_matrix": max_length}
model.init_hidden(batch_size)
output = model(test_input, seq_length_dict, max_length_dict)
self.assertEqual(torch.Size([batch_size, seq_length.max(), decoder_dim]),
output["pred_acoustic_features"].shape)
def test_fixed_attention_parallel_decoder(self):
encoder_dim = 12
vae_dim = 6
audio_encoder_dim = 6
n_frames_per_step = 5
p_teacher_forcing = 1
decoder_dim = 15
model_config = enc_dec_dyn.Config(
modules=[
self._get_encoder_config(encoder_dim),
self._get_encoder_vae_config(decoder_dim, vae_dim),
self._get_fixed_attention_config(),
self._get_parallel_decoder_config(encoder_dim + vae_dim,
decoder_dim)
]
)
model = model_config.create_model()
phoneme_seq_length = torch.tensor((10, 12), dtype=torch.long)
phoneme_max_length = torch.tensor(12, dtype=torch.long)
seq_length = torch.tensor((100, 75), dtype=torch.long)
max_length = torch.tensor(100, dtype=torch.long)
batch_size = 2
test_input = {}
test_input["phonemes"] = torch.ones(
[batch_size, phoneme_seq_length.max(), 1]).long()
test_input["acoustic_features"] = torch.ones(
[batch_size, seq_length.max(), decoder_dim])
test_input["attention_matrix"] = torch.zeros(
(batch_size, max_length, phoneme_max_length))
seq_length_dict = {"phonemes": phoneme_seq_length,
"acoustic_features": seq_length,
"attention_matrix": seq_length}
max_length_dict = {"phonemes": phoneme_max_length,
"acoustic_features": max_length,
"attention_matrix": max_length}
model.init_hidden(batch_size)
output = model(test_input, seq_length_dict, max_length_dict)
self.assertEqual(
torch.Size([batch_size, phoneme_seq_length.max(), encoder_dim]),
output["phoneme_embeddings"].shape)
self.assertEqual(
torch.Size([batch_size, seq_length.max(), vae_dim]),
output["emb_mu"].shape)
self.assertEqual(
torch.Size([batch_size, seq_length.max(), decoder_dim]),
output["pred_acoustic_features"].shape)
self.assertTrue(
(phoneme_seq_length == seq_length_dict["phoneme_embeddings"]).any())
self.assertTrue(
(phoneme_max_length == max_length_dict["phoneme_embeddings"]).any())
self.assertTrue((seq_length == max_length_dict["emb_mu"]).any())
self.assertTrue(
(seq_length == seq_length_dict["pred_acoustic_features"]).any())
expected_params = 1 + 2 * 3 # Phoneme encoder: 1 Emb + 3 Conv (weight & bias)
expected_params += 2 * 3 + 4 + 1 # Acoustic encoder: 3 Conv + GRU + VAE projection
expected_params += 2 + 4 # Parallel decoder: Linear + LSTM
self.assertEqual(expected_params, len([*model.named_parameters()]))
output["pred_acoustic_features"].sum().backward()
def test_save_load(self):
encoder_dim = 12
vae_dim = 6
decoder_dim = 15
def ordered(obj):
if isinstance(obj, dict):
return sorted((k, ordered(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered(x) for x in obj)
else:
return obj
model_config = enc_dec_dyn.Config(
modules=[
| |
map.get('thumbnail')
if map.get('trashed_at') is not None:
self.trashed_at = map.get('trashed_at')
if map.get('type') is not None:
self.type = map.get('type')
if map.get('updated_at') is not None:
self.updated_at = map.get('updated_at')
if map.get('upload_id') is not None:
self.upload_id = map.get('upload_id')
if map.get('url') is not None:
self.url = map.get('url')
if map.get('user_meta') is not None:
self.user_meta = map.get('user_meta')
if map.get('video_media_metadata') is not None:
temp_model = VideoMediaResponse()
self.video_media_metadata = temp_model.from_map(map['video_media_metadata'])
if map.get('video_preview_metadata') is not None:
temp_model = VideoPreviewResponse()
self.video_preview_metadata = temp_model.from_map(map['video_preview_metadata'])
return self
class UpdateShareResponse(TeaModel):
"""
Update share response
"""
def __init__(self, created_at=None, creator=None, description=None, domain_id=None, drive_id=None,
expiration=None, expired=None, owner=None, permissions=None, share_file_path=None, share_id=None,
share_name=None, share_policy=None, status=None, updated_at=None):
# created_at
self.created_at = created_at # type: str
# creator
self.creator = creator # type: str
# description
self.description = description # type: str
# domain_id
self.domain_id = domain_id # type: str
# drive_id
self.drive_id = drive_id # type: str
# expiration
self.expiration = expiration # type: str
# expired
self.expired = expired # type: bool
# owner
self.owner = owner # type: str
# permissions
self.permissions = permissions # type: List[str]
# share_path
self.share_file_path = share_file_path # type: str
# share_id
self.share_id = share_id # type: str
# share_name
self.share_name = share_name # type: str
self.share_policy = share_policy # type: List[SharePermissionPolicy]
# status
self.status = status # type: str
# updated_at
self.updated_at = updated_at # type: str
def validate(self):
if self.share_policy:
for k in self.share_policy:
if k:
k.validate()
def to_map(self):
result = {}
if self.created_at is not None:
result['created_at'] = self.created_at
if self.creator is not None:
result['creator'] = self.creator
if self.description is not None:
result['description'] = self.description
if self.domain_id is not None:
result['domain_id'] = self.domain_id
if self.drive_id is not None:
result['drive_id'] = self.drive_id
if self.expiration is not None:
result['expiration'] = self.expiration
if self.expired is not None:
result['expired'] = self.expired
if self.owner is not None:
result['owner'] = self.owner
if self.permissions is not None:
result['permissions'] = self.permissions
if self.share_file_path is not None:
result['share_file_path'] = self.share_file_path
if self.share_id is not None:
result['share_id'] = self.share_id
if self.share_name is not None:
result['share_name'] = self.share_name
result['share_policy'] = []
if self.share_policy is not None:
for k in self.share_policy:
result['share_policy'].append(k.to_map() if k else None)
if self.status is not None:
result['status'] = self.status
if self.updated_at is not None:
result['updated_at'] = self.updated_at
return result
def from_map(self, map={}):
if map.get('created_at') is not None:
self.created_at = map.get('created_at')
if map.get('creator') is not None:
self.creator = map.get('creator')
if map.get('description') is not None:
self.description = map.get('description')
if map.get('domain_id') is not None:
self.domain_id = map.get('domain_id')
if map.get('drive_id') is not None:
self.drive_id = map.get('drive_id')
if map.get('expiration') is not None:
self.expiration = map.get('expiration')
if map.get('expired') is not None:
self.expired = map.get('expired')
if map.get('owner') is not None:
self.owner = map.get('owner')
if map.get('permissions') is not None:
self.permissions = map.get('permissions')
if map.get('share_file_path') is not None:
self.share_file_path = map.get('share_file_path')
if map.get('share_id') is not None:
self.share_id = map.get('share_id')
if map.get('share_name') is not None:
self.share_name = map.get('share_name')
self.share_policy = []
if map.get('share_policy') is not None:
for k in map.get('share_policy'):
temp_model = SharePermissionPolicy()
self.share_policy.append(temp_model.from_map(k))
if map.get('status') is not None:
self.status = map.get('status')
if map.get('updated_at') is not None:
self.updated_at = map.get('updated_at')
return self
class UploadPartInfo(TeaModel):
"""
*
"""
def __init__(self, etag=None, part_number=None, part_size=None, upload_url=None):
# etag
self.etag = etag # type: str
# PartNumber
self.part_number = part_number # type: int
# PartSize:
self.part_size = part_size # type: int
# upload_url
self.upload_url = upload_url # type: str
def validate(self):
if self.part_number is not None:
self.validate_pattern(self.part_number, 'part_number', '[0-9]+')
self.validate_maximum(self.part_number, 'part_number', 10000)
self.validate_minimum(self.part_number, 'part_number', 1)
if self.part_size is not None:
self.validate_maximum(self.part_size, 'part_size', 5368709120)
self.validate_minimum(self.part_size, 'part_size', 102400)
def to_map(self):
result = {}
if self.etag is not None:
result['etag'] = self.etag
if self.part_number is not None:
result['part_number'] = self.part_number
if self.part_size is not None:
result['part_size'] = self.part_size
if self.upload_url is not None:
result['upload_url'] = self.upload_url
return result
def from_map(self, map={}):
if map.get('etag') is not None:
self.etag = map.get('etag')
if map.get('part_number') is not None:
self.part_number = map.get('part_number')
if map.get('part_size') is not None:
self.part_size = map.get('part_size')
if map.get('upload_url') is not None:
self.upload_url = map.get('upload_url')
return self
class UrlInfo(TeaModel):
"""
*
"""
def __init__(self, download_url=None, thumbnail=None, url=None):
# download_url
self.download_url = download_url # type: str
# thumbnail
self.thumbnail = thumbnail # type: str
# url
self.url = url # type: str
def validate(self):
pass
def to_map(self):
result = {}
if self.download_url is not None:
result['download_url'] = self.download_url
if self.thumbnail is not None:
result['thumbnail'] = self.thumbnail
if self.url is not None:
result['url'] = self.url
return result
def from_map(self, map={}):
if map.get('download_url') is not None:
self.download_url = map.get('download_url')
if map.get('thumbnail') is not None:
self.thumbnail = map.get('thumbnail')
if map.get('url') is not None:
self.url = map.get('url')
return self
class UserAuthentication(TeaModel):
"""
*
"""
def __init__(self, authentication_type=None, created_at=None, detail=None, domain_id=None, identity=None,
last_login_time=None, status=None, user_id=None, extra=None):
# 认证类型
self.authentication_type = authentication_type # type: str
# 创建时间
self.created_at = created_at # type: int
# 详情
self.detail = detail # type: str
# Domain ID
self.domain_id = domain_id # type: str
# 唯一身份标识
self.identity = identity # type: str
# 最后登录时间
self.last_login_time = last_login_time # type: int
# 状态
self.status = status # type: str
# 用户ID
self.user_id = user_id # type: str
# 额外的信息,比如type为mobile时,此字段为国家编号,不填默认86
self.extra = extra # type: str
def validate(self):
self.validate_required(self.authentication_type, 'authentication_type')
self.validate_required(self.created_at, 'created_at')
self.validate_required(self.detail, 'detail')
self.validate_required(self.domain_id, 'domain_id')
self.validate_required(self.identity, 'identity')
self.validate_required(self.last_login_time, 'last_login_time')
self.validate_required(self.status, 'status')
self.validate_required(self.user_id, 'user_id')
def to_map(self):
result = {}
if self.authentication_type is not None:
result['AuthenticationType'] = self.authentication_type
if self.created_at is not None:
result['CreatedAt'] = self.created_at
if self.detail is not None:
result['Detail'] = self.detail
if self.domain_id is not None:
result['DomainID'] = self.domain_id
if self.identity is not None:
result['Identity'] = self.identity
if self.last_login_time is not None:
result['LastLoginTime'] = self.last_login_time
if self.status is not None:
result['Status'] = self.status
if self.user_id is not None:
result['UserID'] = self.user_id
if self.extra is not None:
result['extra'] = self.extra
return result
def from_map(self, map={}):
if map.get('AuthenticationType') is not None:
self.authentication_type = map.get('AuthenticationType')
if map.get('CreatedAt') is not None:
self.created_at = map.get('CreatedAt')
if map.get('Detail') is not None:
self.detail = map.get('Detail')
if map.get('DomainID') is not None:
self.domain_id = map.get('DomainID')
if map.get('Identity') is not None:
self.identity = map.get('Identity')
if map.get('LastLoginTime') is not None:
self.last_login_time = map.get('LastLoginTime')
if map.get('Status') is not None:
self.status = map.get('Status')
if map.get('UserID') is not None:
self.user_id = map.get('UserID')
if map.get('extra') is not None:
self.extra = map.get('extra')
return self
class VerifyCodeRequest(TeaModel):
"""
*
"""
def __init__(self, headers=None, app_id=None, phone_number=None, phone_region=None, sms_code=None,
sms_code_id=None, verify_type=None):
self.headers = headers # type: Dict[str, str]
# App ID, 当前访问的App
self.app_id = app_id # type: str
# 手机号
self.phone_number = phone_number # type: str
# 国家编号,默认86,不需要填+号,直接填数字
self.phone_region = phone_region # type: str
# 短信验证码内容
self.sms_code = sms_code # type: str
# 短信验证码ID
self.sms_code_id = sms_code_id # type: str
# 需要被校验内容的类型
self.verify_type = verify_type # type: str
def validate(self):
self.validate_required(self.app_id, 'app_id')
self.validate_required(self.phone_number, 'phone_number')
self.validate_required(self.sms_code, 'sms_code')
self.validate_required(self.sms_code_id, 'sms_code_id')
def to_map(self):
result = {}
if self.headers is not None:
result['headers'] = self.headers
if self.app_id is not None:
result['app_id'] = self.app_id
if self.phone_number is not None:
result['phone_number'] = self.phone_number
if self.phone_region is not None:
result['phone_region'] = self.phone_region
if self.sms_code is not None:
result['sms_code'] = self.sms_code
if self.sms_code_id is not None:
result['sms_code_id'] = self.sms_code_id
if self.verify_type is not None:
result['verify_type'] = self.verify_type
return result
def from_map(self, map={}):
if map.get('headers') is not None:
self.headers = map.get('headers')
if map.get('app_id') is not None:
self.app_id = map.get('app_id')
if map.get('phone_number') is not None:
self.phone_number = map.get('phone_number')
if map.get('phone_region') is not None:
self.phone_region = map.get('phone_region')
if map.get('sms_code') is not None:
self.sms_code = map.get('sms_code')
if map.get('sms_code_id') is not None:
self.sms_code_id = map.get('sms_code_id')
if map.get('verify_type') is not None:
self.verify_type = map.get('verify_type')
return self
class VerifyCodeResponse(TeaModel):
"""
*
"""
def __init__(self, state=None):
# 修改密码的临时授权码
self.state = state # type: str
def validate(self):
self.validate_required(self.state, 'state')
def to_map(self):
result = {}
if self.state is not None:
result['state'] = self.state
return result
def from_map(self, map={}):
if map.get('state') is not None:
self.state = map.get('state')
return self
class VideoMediaAudioStream(TeaModel):
"""
*
"""
def __init__(self, bit_rate=None, channel_layout=None, channels=None, code_name=None, duration=None,
sample_rate=None):
# bit_rate 音频比特率 单位:bps
self.bit_rate = bit_rate # type: str
# channel_layout 声道布局
self.channel_layout = channel_layout # type: str
# | |
<gh_stars>1-10
"""
element.py
lists and tools to deal with motifs (words).
pwms, pwm and element need to be merged into a single 'motif.py'...
Part of glbase.
"""
from . import config
import re, random, sys, os, string, numpy, copy
from . import utils
from .genelist import genelist as new_gl
from .errors import AssertionError
from .draw import draw
from numpy import zeros
# R=[AG], Y=[CT], K=[GT], M=[AC], S=[GC], W=[AT],
# [ACT] = h, [ACG] = v, [AGT] = d, [CGT] = b
regex_dict = {
"a" : "a",
"c" : "c",
"g" : "g",
"t" : "t",
"r" : "[ag]", # twos
"y" : "[ct]",
"k" : "[gt]",
"m" : "[ac]",
"s" : "[gc]",
"w" : "[at]",
"h" : "[act]", # threes
"v" : "[acg]",
"d" : "[agt]",
"b" : "[cgt]",
"n" : "[acgt]" # four
}
class motif:
"""
**Purpose**
a motif for word-searching in DNA elements
**Arguments**
name
name of the motif
sequence
the sequence in a degenerate DNA alphabet
You can make an empty element with::
e = motif("empty_name")
Here are two examples::
e = motif("Sox2", "ywttswn")
e = motif("Oct4", "atgywnww")
See pwm and pwms part of glbase for position weight matrices
"""
def __init__(self, name=None, sequence="", **kargs):
assert sequence, "no specified sequence for the motif"
assert name, "you must give your motif a name"
self.name = name
self.draw = draw(self)
self.seq = sequence
self.palindromic = utils.isPalindromic(sequence)
if "scramble" in kargs and kargs["scramble"]:
self._scrambleMotif()
def isPalindromic(self):
return(self.palindromic)
def isPalindrome(self):
return(self.palindromic)
def getRegEx(self):
"""
return the regex.
"""
self.re = re.compile("".join(regex_dict[bp] for bp in self.seq.lower()))
return(self.re)
def _scrambleMotif(self, number=10):
"""
(Internal)
generate scrambled motifs.
Be careful with this, it does not work the same as in fexcom.
It does not scramble the motif in-place, instead it returns a set of
'regexd' motifs.
"""
res = []
for _ in range(number):
newSeq = oldSeq = self.seq
while newSeq == oldSeq: # stops the new motif == old motif.
q = [item for item in self.seq]
new = []
while q:
i = random.randint(0, len(q)-1)
new.append(q[i])
q.remove(q[i])
newSeq = "".join(new)
res.append(re.compile(newSeq))
return(res)
def __str__(self):
return("motif name:%s sequence:%s" % (self.name, self.seq))
def __len__(self):
return(len(self.seq))
def scan_sequence(self, seq=None, both_strands=True, mismatches=0):
"""
**Purpose**
scan the word across a single sequence of DNA
**Arguments**
seq (Required)
the DNA sequence (as a string) to be scanned
both_strands (Optional, default=True)
search both strands of the sequence?
mismatches (Optional, default=0)
number of base pair mismatches to tolerate.
**Returns**
A dictionary, containing the keys:
num_motifs_found: The number of motifs discovered
locations: The locations (relative to the 5' end of the seq)
strands: List of strands the motif comes from
sequences: A list of sequences discovered
"""
assert seq, "You must provide a sequence"
if both_strands:
seqs = {"+": seq.lower(), "-": utils.rc(seq.lower())}
else:
seqs = {"+": seq.lower()}
if mismatches == 0:
return(self._scan_no_mismatch(seqs))
else:
return(self._scan_seqs_mismatches(seqs, mismatches))
def _scan_no_mismatch(self, seqs=None):
'''
Search path for no mismatches
This could be done a lot faster I think...
'''
seq_len = len(seqs['+'])
local_pos = []
found_seqs = []
strands = []
num_motifs = 0
motif = self.getRegEx()
for strand in seqs:
i = motif.finditer(seqs[strand])
for m in i:
if strand == "+":
local_pos.append(m.start())
found_seqs.append(seqs['+'][m.start():m.start()+len(self)])
strands.append(strand)
elif strand == "-":
st = abs(seq_len - m.end())
local_pos.append(st) # convert to get the 5' most
found_seqs.append(utils.rc(seqs['+'][st:st+len(self)]))
strands.append(strand)
num_motifs += 1
return({"num_motifs_found": num_motifs, "locations": local_pos, "sequences": found_seqs, "strands": strands})
def _getMismatchRegEx(self, mismatches):
"""
return a list of regexs that take into account the
number of degenerate base pairs.
motif should be a degenerate acgtykrmn type sequence string.
"""
rel = [regex_dict[bp] for bp in self.seq.lower()] # make the base regex to modify below.
seq = self.seq.lower()
rels = []
if mismatches:
pos = 0
while pos < len(seq):
if seq[pos] == "n":
pos += 1 # already a degenerate bp.
else:
old = rel[pos]
rel[pos] = regex_dict["n"]
rels.append(re.compile("".join(rel)))
#print "".join(rel)
rel[pos] = old # replace the old bp.
pos += 1 # move on a bp.
self.re = rels
return(rels)
def _scan_seqs_mismatches(self, seqs, mis_matches=1):
'''
Search path for mismatch searching
At the moment this only reports the number of matched motifs
'''
assert mis_matches < 2, "more than 1 bp degenerate in the back search is not supported. Forcing 1 bp mismatch"
assert mis_matches > 0, 'At least 1 bp mismatch required'
rels = self._getMismatchRegEx(mis_matches)
temp_finds = []
for regex in rels:
for strand in seqs:
found = regex.finditer(seqs[strand])
if found:
for match in found:
# Positions are currently wrong as they are strand relative?
temp_finds.append({"seq": seqs[strand][match.start():match.end()], "pos": "%s:%s:%s" % (strand, match.start(), match.end()) })
found_list = []
# prune duplicate locations;
unique = []
for item in temp_finds: # keep only the first element.
if item["pos"] not in unique:
found_list.append(item["seq"])
unique.append(item["pos"])
return({"num_motifs_found": len(unique), "locations": None, "sequences": found_list, "strands": None})
def scan_sequences(self, seq_data=None, both_strands=True, keep_found_only=False):
"""
**Purpose**
Scan over a set of FASTA sequences and add a new key <name of motif> with
the result either "Found" or None
**Arguments**
seq_data (Required)
the DNA sequence, loaded from a FASTA or a genelist containing a "seq"
key
both_strands (Optional, default=True)
search both strands of the sequence?
keep_found_only (Optional, default=False)
If set to True, then items in the genelist that do not contain the motif are
not added to the returned list (returned list contains only DNA sequences
that contain a motif)
**Returns**
A new genelist copy of seq_data with an additional new key named after the motifs name
and indicating if the motif was found or not
"""
assert seq_data, "You must provide sequence data"
newl = []
for item in seq_data:
res = self.scan_sequence(item["seq"], both_strands=both_strands)
if res["num_motifs_found"]:
newi = copy.deepcopy(item)
item[self.name] = "Found"
item["%s_seqs" % self.name] = ", ".join(res["sequences"])
newl.append(item)
elif not keep_found_only:
newi = copy.deepcopy(item)
item[self.name] = "None"
item["%s_seqs" % self.name] = "None"
newl.append(item)
newgl = seq_data.shallowcopy()
newgl.load_list(newl)
return(newgl)
def count_sequences_with_motif(self, seq_data=None, both_strands=True, **kargs):
"""
**Purpose**
Return some statistics about the numbers and percent of motifs found in each list.
Returns some information in a dict:
{"num_motifs": <The total number of motifs found>,
"with_motif": <The number of items in genelist with 1 or more motif>.
"percent": <The percent of the genelist/fasta with 1 or more motif>}
**Arguments**
seq_data (Required)
the DNA sequence, loaded from a FASTA or a genelist containing a "seq"
key
both_strands (Optional, default=True)
search both strands of the sequence?
"""
assert seq_data, "You must provide sequence data"
results = {"num_motifs": 0, "with_motif": 0}
newl = []
for item in seq_data:
res = self.scan_sequence(item["seq"], both_strands=both_strands)
if res["num_motifs_found"]:
results["num_motifs"] += res["num_motifs_found"]
results["with_motif"] += 1
results["percent"] = (results["with_motif"] / float(len(seq_data))) * 100.0
return(results)
def getMotifMatches(self, genelist=None, both_strands=True, return_num_motifs_only=False):
"""
**Purpose**
scan across the genelist (must include sequence data) with this element and then
return a pileup spanning the length of the fasta, with a score for each time the element
occurs.
The fasta sequence lengths must be the same across all entries in the genelist.
**Arguments**
genelist (Required)
the genelist to scan. Must contain sequence data in "seq", "sequence" or "f".
If you load a FASTA file::
f = genelist(..., format=format.fasta)
then the sequence is already stored in "seq"
both_strands (Optional, default=True)
search both strands by default.
return_num_motifs_only (Optional, default=False)
By default getMotifMatches() returns a numpy array.
If this is set to True it returns a single integer of the number of motifs found
**Returns**
A numpy array going from 0 .. n bp in length. Each location there is a motif the score at that
position will be +1. No motifs are 0.
THe motif score extends along the length of the motif, so for example if your | |
user_cn = AD.GetLDAPObject(cn)
if user_cn is None and auto_create is True:
# Try and create it
if AD.CreateOU(cn) is not True:
AD._errors.append("<b>Error creating faculty OU: </b> " + cn + "<br />")
ret = False
else:
AD._errors.append("<b>Faculty OU created: </b> " + cn + "<br />")
elif user_cn is None and auto_create is not True:
r = "<B>Faculty Container not present and auto create disabled! (" + cn + ")<br/>"
AD._errors.append(r)
ret = False
else:
AD._errors.append("<b>Faculty OU exists: </b> " + cn + "<br />")
# Check that the groups cn exists
group_cn = AD.GetLDAPObject(AD._ad_faculty_group_cn)
if group_cn is None and auto_create is True:
if AD.CreateOU(AD._ad_faculty_group_cn) is not True:
AD._errors.append("<b>Error creating faculty groups OU: </b> " + AD._ad_faculty_group_cn + "<br />")
ret = False
else:
AD._errors.append("<b>Faculty groups OU created: </b> " + AD._ad_faculty_group_cn + "<br />")
elif group_cn is None and auto_create is not True:
r = "<B>Faculty Groups Container not present and auto create disabled! (" + AD._ad_faculty_group_cn + ")<br/>"
AD._errors.append(r)
ret = False
else:
AD._errors.append("<b>Faculty groups OU exists: </b> " + AD._ad_faculty_group_cn + "<br />")
# Check that the faculty group exists
faculty_group_dn = AD.GetLDAPObject(AD._ad_faculty_group_dn)
if faculty_group_dn is None and auto_create is True:
if AD.CreateGroup(AD._ad_faculty_group_dn) is not True:
AD._errors.append("<b>Error creating faculty group: </b> " + AD._ad_faculty_group_dn + "<br />")
ret = False
else:
AD._errors.append("<b>Faculty group created: </b> " + AD._ad_faculty_group_dn + "<br />")
elif faculty_group_dn is None and auto_create is not True:
r = "<B>Faculty Group not present and auto create disabled! (" + AD._ad_faculty_group_dn + ")<br/>"
AD._errors.append(r)
ret = False
else:
AD._errors.append("<b>Faculty group exists: </b> " + AD._ad_faculty_group_dn + "<br />")
return ret
@staticmethod
def CreateGroup(group_dn):
ret = False
if AD.Connect() is not True:
return ret
if group_dn == "":
AD._errors.append("<b>Invalid Group CN: </b>" + str(group_dn) + "<br />")
return ret
if AD.GetLDAPObject(group_dn) is not None:
# AD._errors.append("<b>Group already exists: </b> " + str(group_dn) + "<br />")
return True
# Get the sAMAccountName name from the DN
parts = group_dn.split(',')
gname = ""
if len(parts) > 0:
parts = parts[0].split('=')
else:
AD._errors.append("<b>Invalid Group DN! </b>" + str(group_dn) + "<br />")
return ret
if len(parts) == 2:
gname = parts[1]
else:
AD._errors.append("<b>Invalid Group DN! </b>" + str(group_dn) + "<br />")
return ret
if gname == "":
AD._errors.append("<b>Invalid Group DN! </b>" + str(group_dn) + "<br />")
return ret
group_attrs = dict()
group_attrs['objectClass'] = ['top', 'group']
# group_attrs['cn'] = 'Students'
# group_attrs['groupType'] = "0x80000002"
# group_attrs['name'] = 'Students'
# group_attrs['objectCategory'] = 'CN=Group,CN=Schema,CN=Configuration,DC=cbcc,DC=pencollege,DC=net'
group_attrs['sAMAccountName'] = gname # gname.encode(AD._ad_encoding)
group_ldif = Util.GetModList(group_attrs, None)
try:
# AD._ldap.add_s(group_dn.encode(AD._ad_encoding), group_ldif)
# ret = True
#AD._ldap.add(group_dn.encode(AD._ad_encoding), attributes=group_attrs)
AD._ldap.add(group_dn, attributes=group_attrs)
ret = True
except ldap3.core.exceptions.LDAPOperationsErrorResult as error_message:
# except ldap.LDAPError as error_message:
AD._errors.append("<B>Error creating group:</b> " + str(group_dn) + "<br />")
return ret
@staticmethod
def AddUserToGroup(user_dn, group_dn):
ret = False
if AD.Connect() is not True:
return ret
try:
ad_add_members_to_groups(AD._ldap,
user_dn,
group_dn)
ret = True
except ldap3.core.exceptions.LDAPOperationsErrorResult as error_message:
if error_message[0]['desc'] == 'Already exists':
# Ignore if user is already in the group
ret = True
pass
else:
AD._errors.append("<B>Error adding user (" + str(user_dn) + ") to group (" + str(group_dn) + ")</b> %s" % error_message)
return ret
@staticmethod
def GetNameFromLDAPPath(path):
ret = ""
# Get the name for the OU
parts = path.split(',')
ld_name = ""
if len(parts) > 0:
parts = parts[0].split('=')
else:
AD._errors.append("<b>Invalid LDAP Path! </b>" + str(path) + "<br />")
return ret
if len(parts) == 2:
ld_name = parts[1].strip()
else:
AD._errors.append("<b>Invalid LDAP Path! </b>" + str(path) + "<br />")
return ret
if ld_name == "":
AD._errors.append("<b>Invalid LDAP Path! </b>" + str(path) + "<br />")
return ret
ret = ld_name
return ret
@staticmethod
def GetParentLDAPPath(cn, levels=1):
ret = cn
if levels > 0:
levels -= 1
parts = cn.split(',')
ret = ','.join(parts[1:])
if levels > 0:
ret = AD.GetParentLDAPPath(ret, levels)
return ret
@staticmethod
def GetDN(object_name, container_cn):
new_dn = 'cn=' + object_name + "," + container_cn
return new_dn
@staticmethod
def CreateOU(ou_dn):
ret = False
if AD.Connect() is not True:
return ret
if ou_dn == "":
AD._errors.append("<b>Invalid OU: </b>" + str(ou_dn) + "<br />")
return ret
ou = AD.GetLDAPObject(ou_dn)
if ou is not None:
# AD._errors.append("<b>OU Already exists:</b> " + str(ou_dn) + "<br />")
return True
ou_name = AD.GetNameFromLDAPPath(ou_dn)
ou_attrs = dict()
ou_attrs['name'] = ou_name # ou_name.encode(AD._ad_encoding)
try:
# AD._ldap.add_s(ou_dn.encode(AD._ad_encoding), ou_ldif)
#AD._ldap.add(ou_dn.encode(AD._ad_encoding),
# object_class=['top', 'organizationalUnit'],
# attributes=ou_attrs,
# )
AD._ldap.add(ou_dn,
object_class=['top', 'organizationalUnit'],
attributes=ou_attrs,
)
ret = True
except ldap3.core.exceptions.LDAPOperationsErrorResult as error_message:
# except ldap.LDAPError as error_message:
AD._errors.append("<B>Error creating OU:</b> " + str(ou_dn) + "<br />" + str(error_message))
return ret
@staticmethod
def MakePathCN(cn):
ret = False
log = ""
parts = cn.split(',')
dc = ""
# Get the DC entries
while len(parts)> 0:
item = parts.pop()
if 'dc=' in item.lower():
if dc != "":
dc = "," + dc
dc = item + dc
else:
parts.append(item)
break
# Now make sure each OU exists
last_cn = dc
while len(parts) > 0:
item = parts.pop()
cn = item + "," + last_cn
last_cn = cn
log += " - " + cn
ret = AD.CreateOU(cn)
ret = log
return ret
@staticmethod
def CreateUser(user_name, container_cn):
ret = False
if AD.ConnectAD() is not True:
return ret
# Make sure the container exists
if AD.MakePathCN(container_cn) is False:
AD._errors.append("<b>Error making OU path: </b>" + str(container_cn))
return ret
# TODO - Detect user in different OU and move it?
new_user_dn = AD.GetDN(user_name, container_cn)
user = AD.GetLDAPObject(new_user_dn, True)
if user is not None:
# User already exists
# Do we need to move the user?
# curr_dn = user[0][1]['distinguishedName'][0]
curr_dn = user[0]['attributes']['distinguishedName']
if curr_dn.lower() != new_user_dn.lower():
# Rename to move
try:
# AD._ldap.rename_s(curr_dn.encode(AD._ad_encoding),
# "cn=" + user_name.encode(AD._ad_encoding), container_cn.encode(AD._ad_encoding))
safe_dn = safe_rdn(curr_dn)
# print("Moving user " + str(curr_dn) + " / " + str(safe_dn) + " / " + str(container_cn))
curr_dn = str(curr_dn)
r = AD._ldap.modify_dn(curr_dn,
safe_dn,
new_superior=container_cn)
# print("RET " + str(r))
except ldap3.core.exceptions.LDAPOperationsErrorResult as error_message:
# except ldap.LDAPError as error_message:
AD._errors.append("<b>Error moving user to new OU</b> " + str(user_name) + str(error_message))
return False
except Exception as error_message:
AD._errors.append("<b>UNKNOWN ERROR moving user to new OU</b> " + str(user_name) + str(error_message))
return False
return True
user_attrs = dict()
user_attrs['objectClass'] = ['top', 'person', 'organizationalPerson', 'user']
#user_attrs['sAMAccountName'] = user_name.encode(AD._ad_encoding)
user_attrs['sAMAccountName'] = user_name
uac = 514 # normal user account
# Can't use other options until a password is set
# uac = 0x10200 # normal account, enabled, don't expire password
#user_attrs['userAccountControl'] = str(uac).encode(AD._ad_encoding)
user_attrs['userAccountControl'] = str(uac)
# user_ldif = Util.GetModList(user_attrs, None)
try:
# AD._ldap.add_s(new_user_dn.encode(AD._ad_encoding), user_ldif)
#AD._ldap.add(new_user_dn.encode(AD._ad_encoding),
# attributes=user_attrs)
AD._ldap.add(new_user_dn,
attributes=user_attrs)
except ldap3.core.exceptions.LDAPExceptionError as error_message:
# except ldap.LDAPError as error_message:
AD._errors.append("<b>Add User Error:</b> " + str(user_name) + " %s" % error_message)
if AD.GetLDAPObject(new_user_dn) is not None:
# User is created and present
ret = True
return ret
@staticmethod
def UpdateUserInfo(user_dn, email_address="", first_name="", last_name="", display_name="", description="",
id_number="", home_drive_letter="", home_directory="", login_script="", profile_path="",
ts_allow_login='FALSE'):
ret = True
if AD.ConnectAD() is not True:
return False
if AD.GetLDAPObject(user_dn) is None:
# User doesn't exist
AD._errors.append("<B>Error changing user that doesn't exist (" + str(user_dn) + ")")
return False
u_attrs = dict()
#u_attrs['userPrincipalName'] = [(ldap3.MODIFY_REPLACE, [email_address.encode(AD._ad_encoding)])]
#u_attrs['givenName'] = [(ldap3.MODIFY_REPLACE, [first_name.encode(AD._ad_encoding)])]
#u_attrs['sn'] = [(ldap3.MODIFY_REPLACE, [last_name.encode(AD._ad_encoding)])]
#u_attrs['displayName'] = [(ldap3.MODIFY_REPLACE, [display_name.encode(AD._ad_encoding)])]
#u_attrs['description'] = [(ldap3.MODIFY_REPLACE, [description.encode(AD._ad_encoding)])]
#u_attrs['mail'] = [(ldap3.MODIFY_REPLACE, [email_address.encode(AD._ad_encoding)])]
#u_attrs['employeeID'] = [(ldap3.MODIFY_REPLACE, [id_number.encode(AD._ad_encoding)])]
#u_attrs['msTSAllowLogon'] = [(ldap3.MODIFY_REPLACE, [ts_allow_login])]
u_attrs['userPrincipalName'] = [(ldap3.MODIFY_REPLACE, [email_address])]
u_attrs['givenName'] = [(ldap3.MODIFY_REPLACE, [first_name])]
u_attrs['sn'] = [(ldap3.MODIFY_REPLACE, [last_name])]
u_attrs['displayName'] = [(ldap3.MODIFY_REPLACE, [display_name])]
u_attrs['description'] = [(ldap3.MODIFY_REPLACE, [description])]
u_attrs['mail'] = [(ldap3.MODIFY_REPLACE, [email_address])]
u_attrs['employeeID'] = [(ldap3.MODIFY_REPLACE, [id_number])]
u_attrs['msTSAllowLogon'] = [(ldap3.MODIFY_REPLACE, [ts_allow_login])]
# TODO - Figure out how to duplicate userParameters so we can have better control of Terminal Services settings
# Just setting this value doesn't seem to be working - need different encoding?
#up = u" PCtxCfgPresent㔵攱戰ぢCtxCfgFlags1〰てㄲ〹CtxShadow㈰〰〰〰*CtxMinEncryptionLevel".decode("utf-8")
up = u" PCtxCfgPresent㔵攱戰ぢCtxCfgFlags1〰てㄲ〹CtxShadow㈰〰〰〰*CtxMinEncryptionLevel"
u_attrs['userParameters'] = [(ldap3.MODIFY_REPLACE, [up])]
# u_ldif = Util.GetModList(u_attrs, ldap3.MODIFY_REPLACE)
# Update Base info
try:
# AD._ldap.modify_s(user_dn.encode(AD._ad_encoding), u_ldif)
#AD._ldap.modify(user_dn.encode(AD._ad_encoding),
# u_attrs)
AD._ldap.modify(user_dn,
u_attrs)
except ldap3.core.exceptions.LDAPExceptionError as error_message:
# except ldap.LDAPError as error_message:
AD._errors.append("<b>Error updating user: " + str(user_dn) + " | |
<filename>oslo_db/sqlalchemy/exc_filters.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Define exception redefinitions for SQLAlchemy DBAPI exceptions."""
import collections
import logging
import re
import sys
from sqlalchemy import event
from sqlalchemy import exc as sqla_exc
from oslo_db import exception
LOG = logging.getLogger(__name__)
_registry = collections.defaultdict(
lambda: collections.defaultdict(
list
)
)
def filters(dbname, exception_type, regex):
"""Mark a function as receiving a filtered exception.
:param dbname: string database name, e.g. 'mysql'
:param exception_type: a SQLAlchemy database exception class, which
extends from :class:`sqlalchemy.exc.DBAPIError`.
:param regex: a string, or a tuple of strings, that will be processed
as matching regular expressions.
"""
def _receive(fn):
_registry[dbname][exception_type].extend(
(fn, re.compile(reg))
for reg in
((regex,) if not isinstance(regex, tuple) else regex)
)
return fn
return _receive
# NOTE(zzzeek) - for Postgresql, catch both OperationalError, as the
# actual error is
# psycopg2.extensions.TransactionRollbackError(OperationalError),
# as well as sqlalchemy.exc.DBAPIError, as SQLAlchemy will reraise it
# as this until issue #3075 is fixed.
@filters("mysql", sqla_exc.OperationalError, (r"^.*\b1213\b.*Deadlock found.*", r"^.*\b1213\b.*Deadlock: wsrep.*"))
@filters("mysql", sqla_exc.DatabaseError,
r"^.*\b1205\b.*Lock wait timeout exceeded.*")
@filters("mysql", sqla_exc.InternalError, r"^.*\b1213\b.*Deadlock found.*")
@filters("mysql", sqla_exc.InternalError,
r"^.*\b1213\b.*detected deadlock/conflict.*")
@filters("cockroachdb", sqla_exc.OperationalError, (r"^.*retry txn.*",
r"^.*restart transaction.*"))
@filters("postgresql", sqla_exc.OperationalError, r"^.*deadlock detected.*")
@filters("postgresql", sqla_exc.DBAPIError, r"^.*deadlock detected.*")
@filters("ibm_db_sa", sqla_exc.DBAPIError, r"^.*SQL0911N.*")
def _deadlock_error(operational_error, match, engine_name, is_disconnect):
"""Filter for MySQL or Postgresql deadlock error.
NOTE(comstud): In current versions of DB backends, Deadlock violation
messages follow the structure:
mysql+mysqldb:
(OperationalError) (1213, 'Deadlock found when trying to get lock; try '
'restarting transaction') <query_str> <query_args>
mysql+mysqlconnector:
(InternalError) 1213 (40001): Deadlock found when trying to get lock; try
restarting transaction
postgresql:
(TransactionRollbackError) deadlock detected <deadlock_details>
ibm_db_sa:
SQL0911N The current transaction has been rolled back because of a
deadlock or timeout <deadlock details>
"""
raise exception.DBDeadlock(operational_error)
@filters("mysql", sqla_exc.IntegrityError,
r"^.*\b1062\b.*Duplicate entry '(?P<value>.*)'"
r" for key '(?P<columns>[^']+)'.*$")
# NOTE(jd) For binary types
@filters("mysql", sqla_exc.IntegrityError,
r"^.*\b1062\b.*Duplicate entry \\'(?P<value>.*)\\'"
r" for key \\'(?P<columns>.+)\\'.*$")
# NOTE(pkholkin): the first regex is suitable only for PostgreSQL 9.x versions
# the second regex is suitable for PostgreSQL 8.x versions
@filters("postgresql", sqla_exc.IntegrityError,
(r'^.*duplicate\s+key.*"(?P<columns>[^"]+)"\s*\n.*'
r'Key\s+\((?P<key>.*)\)=\((?P<value>.*)\)\s+already\s+exists.*$',
r"^.*duplicate\s+key.*\"(?P<columns>[^\"]+)\"\s*\n.*$"))
@filters("cockroachdb", sqla_exc.IntegrityError, r'^.*duplicate\s+key.*"(?P<columns>[^"]+)"\s*\n.*')
def _default_dupe_key_error(integrity_error, match, engine_name,
is_disconnect):
"""Filter for MySQL or Postgresql duplicate key error.
note(boris-42): In current versions of DB backends unique constraint
violation messages follow the structure:
postgres:
1 column - (IntegrityError) duplicate key value violates unique
constraint "users_c1_key"
N columns - (IntegrityError) duplicate key value violates unique
constraint "name_of_our_constraint"
mysql+mysqldb:
1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
'c1'")
N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
with -' for key 'name_of_our_constraint'")
mysql+mysqlconnector:
1 column - (IntegrityError) 1062 (23000): Duplicate entry 'value_of_c1' for
key 'c1'
N columns - (IntegrityError) 1062 (23000): Duplicate entry 'values
joined with -' for key 'name_of_our_constraint'
"""
columns = match.group('columns')
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
# where `t` it is table name and columns `c1`, `c2`
# are in UniqueConstraint.
uniqbase = "uniq_"
if not columns.startswith(uniqbase):
if engine_name == "postgresql":
columns = [columns[columns.index("_") + 1:columns.rindex("_")]]
else:
columns = [columns]
else:
columns = columns[len(uniqbase):].split("0")[1:]
value = match.groupdict().get('value')
raise exception.DBDuplicateEntry(columns, integrity_error, value)
@filters("sqlite", sqla_exc.IntegrityError,
(r"^.*columns?(?P<columns>[^)]+)(is|are)\s+not\s+unique$",
r"^.*UNIQUE\s+constraint\s+failed:\s+(?P<columns>.+)$",
r"^.*PRIMARY\s+KEY\s+must\s+be\s+unique.*$"))
def _sqlite_dupe_key_error(integrity_error, match, engine_name, is_disconnect):
"""Filter for SQLite duplicate key error.
note(boris-42): In current versions of DB backends unique constraint
violation messages follow the structure:
sqlite:
1 column - (IntegrityError) column c1 is not unique
N columns - (IntegrityError) column c1, c2, ..., N are not unique
sqlite since 3.7.16:
1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1
N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2
sqlite since 3.8.2:
(IntegrityError) PRIMARY KEY must be unique
"""
columns = []
# NOTE(ochuprykov): We can get here by last filter in which there are no
# groups. Trying to access the substring that matched by
# the group will lead to IndexError. In this case just
# pass empty list to exception.DBDuplicateEntry
try:
columns = match.group('columns')
columns = [c.split('.')[-1] for c in columns.strip().split(", ")]
except IndexError:
pass
raise exception.DBDuplicateEntry(columns, integrity_error)
@filters("sqlite", sqla_exc.IntegrityError,
r"(?i).*foreign key constraint failed")
@filters("postgresql", sqla_exc.IntegrityError,
r".*on table \"(?P<table>[^\"]+)\" violates "
"foreign key constraint \"(?P<constraint>[^\"]+)\".*\n"
"DETAIL: Key \((?P<key>.+)\)=\(.+\) "
"is (not present in|still referenced from) table "
"\"(?P<key_table>[^\"]+)\".")
@filters("mysql", sqla_exc.IntegrityError,
r".*Cannot (add|delete) or update a (child|parent) row: "
'a foreign key constraint fails \([`"].+[`"]\.[`"](?P<table>.+)[`"], '
'CONSTRAINT [`"](?P<constraint>.+)[`"] FOREIGN KEY '
'\([`"](?P<key>.+)[`"]\) REFERENCES [`"](?P<key_table>.+)[`"] ')
def _foreign_key_error(integrity_error, match, engine_name, is_disconnect):
"""Filter for foreign key errors."""
try:
table = match.group("table")
except IndexError:
table = None
try:
constraint = match.group("constraint")
except IndexError:
constraint = None
try:
key = match.group("key")
except IndexError:
key = None
try:
key_table = match.group("key_table")
except IndexError:
key_table = None
raise exception.DBReferenceError(table, constraint, key, key_table,
integrity_error)
@filters("postgresql", sqla_exc.IntegrityError,
r".*new row for relation \"(?P<table>.+)\" "
"violates check constraint "
"\"(?P<check_name>.+)\"")
def _check_constraint_error(
integrity_error, match, engine_name, is_disconnect):
"""Filter for check constraint errors."""
try:
table = match.group("table")
except IndexError:
table = None
try:
check_name = match.group("check_name")
except IndexError:
check_name = None
raise exception.DBConstraintError(table, check_name, integrity_error)
@filters("postgresql", sqla_exc.ProgrammingError,
r".* constraint \"(?P<constraint>.+)\" "
"of relation "
"\"(?P<relation>.+)\" does not exist")
@filters("mysql", sqla_exc.InternalError,
r".*1091,.*Can't DROP '(?P<constraint>.+)'; "
"check that column/key exists")
@filters("mysql", sqla_exc.OperationalError,
r".*1091,.*Can't DROP '(?P<constraint>.+)'; "
"check that column/key exists")
@filters("mysql", sqla_exc.InternalError,
r".*1025,.*Error on rename of '.+/(?P<relation>.+)' to ")
def _check_constraint_non_existing(
programming_error, match, engine_name, is_disconnect):
"""Filter for constraint non existing errors."""
try:
relation = match.group("relation")
except IndexError:
relation = None
try:
constraint = match.group("constraint")
except IndexError:
constraint = None
raise exception.DBNonExistentConstraint(relation,
constraint,
programming_error)
@filters("sqlite", sqla_exc.OperationalError,
r".* no such table: (?P<table>.+)")
@filters("mysql", sqla_exc.InternalError,
r".*1051,.*Unknown table '(.+\.)?(?P<table>.+)'\"")
@filters("mysql", sqla_exc.OperationalError,
r".*1051,.*Unknown table '(.+\.)?(?P<table>.+)'\"")
@filters("postgresql", sqla_exc.ProgrammingError,
r".* table \"(?P<table>.+)\" does not exist")
def _check_table_non_existing(
programming_error, match, engine_name, is_disconnect):
"""Filter for table non existing errors."""
raise exception.DBNonExistentTable(match.group("table"), programming_error)
@filters("mysql", sqla_exc.InternalError,
r".*1049,.*Unknown database '(?P<database>.+)'\"")
@filters("mysql", sqla_exc.OperationalError,
r".*1049,.*Unknown database '(?P<database>.+)'\"")
@filters("postgresql", sqla_exc.OperationalError,
r".*database \"(?P<database>.+)\" does not exist")
@filters("sqlite", sqla_exc.OperationalError,
".*unable to open database file.*")
def _check_database_non_existing(
error, match, engine_name, is_disconnect):
try:
database = match.group("database")
except IndexError:
database = None
raise exception.DBNonExistentDatabase(database, error)
@filters("ibm_db_sa", sqla_exc.IntegrityError, r"^.*SQL0803N.*$")
def _db2_dupe_key_error(integrity_error, match, engine_name, is_disconnect):
"""Filter for DB2 duplicate key errors.
N columns - (IntegrityError) SQL0803N One or more values in the INSERT
statement, UPDATE statement, or foreign key update caused by a
DELETE statement are not valid because the primary key, unique
constraint or unique index identified by "2" constrains table
"NOVA.KEY_PAIRS" from having duplicate values for the index
key.
"""
# NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the
# columns so we have to omit that from the DBDuplicateEntry error.
raise exception.DBDuplicateEntry([], integrity_error)
@filters("mysql", sqla_exc.DBAPIError, r".*\b1146\b")
def _raise_mysql_table_doesnt_exist_asis(
error, match, engine_name, is_disconnect):
"""Raise MySQL error 1146 as is.
Raise MySQL error 1146 as is, so that it does not conflict with
the MySQL dialect's checking a table not existing.
"""
raise error
@filters("mysql", sqla_exc.OperationalError,
r".*(1292|1366).*Incorrect \w+ value.*")
@filters("mysql", sqla_exc.DataError,
r".*1265.*Data truncated for column.*")
@filters("mysql", sqla_exc.DataError,
r".*1264.*Out of range value for column.*")
@filters("mysql", sqla_exc.InternalError,
r"^.*1366.*Incorrect string value:*")
@filters("sqlite", sqla_exc.ProgrammingError,
r"(?i).*You must not use 8-bit bytestrings*")
@filters("mysql", sqla_exc.DataError,
r".*1406.*Data too long for column.*")
def _raise_data_error(error, match, engine_name, is_disconnect):
"""Raise DBDataError exception for different data errors."""
raise exception.DBDataError(error)
@filters("mysql", sqla_exc.OperationalError,
r".*\(1305,\s+\'SAVEPOINT\s+(.+)\s+does not exist\'\)")
def _raise_savepoints_as_dberrors(error, match, engine_name, is_disconnect):
# NOTE(rpodolyaka): this is a special case of an OperationalError that used
# to be an InternalError. It's expected to be wrapped into oslo.db error.
raise exception.DBError(error)
@filters("*", sqla_exc.OperationalError, r".*")
def _raise_operational_errors_directly_filter(operational_error,
match, engine_name,
is_disconnect):
"""Filter for all remaining OperationalError classes and apply.
Filter for all remaining OperationalError classes and apply
special rules.
"""
if is_disconnect:
# operational errors that represent disconnect
# should be wrapped
raise exception.DBConnectionError(operational_error)
else:
# NOTE(comstud): A lot of code is checking for OperationalError
# so let's not wrap it for now.
raise operational_error
@filters("mysql", sqla_exc.OperationalError, r".*\(.*(?:2002|2003|2006|2013|1047)") # noqa
@filters("mysql", sqla_exc.InternalError, r".*\(.*(?:1927)") # noqa
@filters("mysql", sqla_exc.InternalError, r".*Packet sequence number wrong") # noqa
@filters("postgresql", sqla_exc.OperationalError, r".*could not connect to server") # noqa
@filters("ibm_db_sa", sqla_exc.OperationalError, r".*(?:30081)")
def _is_db_connection_error(operational_error, match, engine_name,
is_disconnect):
"""Detect the exception as indicating a recoverable error on connect."""
raise exception.DBConnectionError(operational_error)
@filters("*", sqla_exc.NotSupportedError, r".*")
def _raise_for_NotSupportedError(error, match, engine_name, is_disconnect):
raise exception.DBNotSupportedError(error)
@filters("*", sqla_exc.DBAPIError, r".*")
def _raise_for_remaining_DBAPIError(error, match, engine_name, is_disconnect):
"""Filter for remaining DBAPIErrors.
Filter for remaining DBAPIErrors and wrap | |
viewing_condition_dependent_parameters(Y_b, Y_w, L_A):
"""
Returns the viewing condition dependent parameters.
Parameters
----------
Y_b : numeric
Adapting field *Y* tristimulus value :math:`Y_b`.
Y_w : numeric
Whitepoint *Y* tristimulus value :math:`Y_w`.
L_A : numeric
Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`.
Returns
-------
tuple
Viewing condition dependent parameters.
Examples
--------
>>> viewing_condition_dependent_parameters(20.0, 100.0, 318.31) # noqa # doctest: +ELLIPSIS
(0.2000000..., 1.1675444..., 1.0003040..., 1.0003040..., 1.9272135...)
"""
n = Y_b / Y_w
F_L = luminance_level_adaptation_factor(L_A)
N_bb, N_cb = chromatic_induction_factors(n)
z = base_exponential_non_linearity(n)
return n, F_L, N_bb, N_cb, z
def degree_of_adaptation(F, L_A):
"""
Returns the degree of adaptation :math:`D` from given surround maximum
degree of adaptation :math:`F` and Adapting field *luminance* :math:`L_A`
in :math:`cd/m^2`.
Parameters
----------
F : numeric
Surround maximum degree of adaptation :math:`F`.
L_A : numeric
Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`.
Returns
-------
numeric
Degree of adaptation :math:`D`.
Examples
--------
>>> degree_of_adaptation(1.0, 318.31) # doctest: +ELLIPSIS
0.9944687...
"""
D = F * (1 - (1 / 3.6) * np.exp((-L_A - 42) / 92))
return D
def full_chromatic_adaptation_forward(RGB, RGB_w, Y_w, D):
"""
Applies full chromatic adaptation to given *CMCCAT2000* transform sharpened
*RGB* matrix using given *CMCCAT2000* transform sharpened whitepoint
*RGB_w* matrix.
Parameters
----------
RGB : array_like
*CMCCAT2000* transform sharpened *RGB* matrix.
RGB_w : array_like
*CMCCAT2000* transform sharpened whitepoint *RGB_w* matrix.
Y_w : numeric
Whitepoint *Y* tristimulus value :math:`Y_w`.
D : numeric
Degree of adaptation :math:`D`.
Returns
-------
ndarray, (3,)
Adapted *RGB* matrix.
Examples
--------
>>> RGB = np.array([18.985456, 20.707422, 21.747482])
>>> RGB_w = np.array([94.930528, 103.536988, 108.717742])
>>> Y_w = 100.0
>>> D = 0.994468780088
>>> full_chromatic_adaptation_forward(RGB, RGB_w, Y_w, D) # noqa # doctest: +ELLIPSIS
array([ 19.9937078..., 20.0039363..., 20.0132638...])
"""
R, G, B = np.ravel(RGB)
R_w, G_w, B_w = np.ravel(RGB_w)
equation = lambda x, y: ((Y_w * D / y) + 1 - D) * x
R_c = equation(R, R_w)
G_c = equation(G, G_w)
B_c = equation(B, B_w)
return np.array([R_c, G_c, B_c])
def full_chromatic_adaptation_reverse(RGB, RGB_w, Y_w, D):
"""
Reverts full chromatic adaptation of given *CMCCAT2000* transform sharpened
*RGB* matrix using given *CMCCAT2000* transform sharpened whitepoint
*RGB_w* matrix.
Parameters
----------
RGB : array_like
*CMCCAT2000* transform sharpened *RGB* matrix.
RGB_w : array_like
*CMCCAT2000* transform sharpened whitepoint *RGB_w* matrix.
Y_w : numeric
Whitepoint *Y* tristimulus value :math:`Y_w`.
D : numeric
Degree of adaptation :math:`D`.
Returns
-------
ndarray, (3,)
Adapted *RGB* matrix.
Examples
--------
>>> RGB = np.array([19.99370783, 20.00393634, 20.01326387])
>>> RGB_w = np.array([94.930528, 103.536988, 108.717742])
>>> Y_w = 100.0
>>> D = 0.994468780088
>>> full_chromatic_adaptation_reverse(RGB, RGB_w, Y_w, D)
array([ 18.985456, 20.707422, 21.747482])
"""
R, G, B = np.ravel(RGB)
R_w, G_w, B_w = np.ravel(RGB_w)
equation = lambda x, y: x / (Y_w * (D / y) + 1 - D)
R_c = equation(R, R_w)
G_c = equation(G, G_w)
B_c = equation(B, B_w)
return np.array([R_c, G_c, B_c])
def RGB_to_rgb(RGB):
"""
Converts given *RGB* matrix to *Hunt-Pointer-Estevez*
:math:`\\rho\gamma\\beta` colourspace.
Parameters
----------
RGB : array_like, (3,)
*RGB* matrix.
Returns
-------
ndarray, (3,)
*Hunt-Pointer-Estevez* :math:`\\rho\gamma\\beta` colourspace matrix.
Examples
--------
>>> RGB = np.array([19.99370783, 20.00393634, 20.01326387])
>>> RGB_to_rgb(RGB) # doctest: +ELLIPSIS
array([ 19.9969397..., 20.0018612..., 20.0135053...])
"""
rgb = np.dot(np.dot(XYZ_TO_HPE_MATRIX, CAT02_INVERSE_CAT), RGB)
return rgb
def rgb_to_RGB(rgb):
"""
Converts given *Hunt-Pointer-Estevez* :math:`\\rho\gamma\\beta` colourspace
matrix to *RGB* matrix.
Parameters
----------
rgb : array_like, (3,)
*Hunt-Pointer-Estevez* :math:`\\rho\gamma\\beta` colourspace matrix.
Returns
-------
ndarray, (3,)
*RGB* matrix.
Examples
--------
>>> rgb = np.array([19.99693975, 20.00186123, 20.0135053])
>>> rgb_to_RGB(rgb) # doctest: +ELLIPSIS
array([ 19.9937078..., 20.0039363..., 20.0132638...])
"""
RGB = np.dot(np.dot(CAT02_CAT, HPE_TO_XYZ_MATRIX), rgb)
return RGB
def post_adaptation_non_linear_response_compression_forward(RGB, F_L):
"""
Returns given *CMCCAT2000* transform sharpened *RGB* matrix with post
adaptation non linear response compression.
Parameters
----------
RGB : array_like
*CMCCAT2000* transform sharpened *RGB* matrix.
Returns
-------
ndarray, (3,)
Compressed *CMCCAT2000* transform sharpened *RGB* matrix.
Examples
--------
>>> RGB = np.array([19.99693975, 20.00186123, 20.0135053])
>>> F_L = 1.16754446415
>>> post_adaptation_non_linear_response_compression_forward(RGB, F_L) # noqa # doctest: +ELLIPSIS
array([ 7.9463202..., 7.9471152..., 7.9489959...])
"""
# TODO: Check for negative values and their handling.
RGB_c = ((((400 * (F_L * RGB / 100) ** 0.42) /
(27.13 + (F_L * RGB / 100) ** 0.42))) + 0.1)
return RGB_c
def post_adaptation_non_linear_response_compression_reverse(RGB, F_L):
"""
Returns given *CMCCAT2000* transform sharpened *RGB* matrix without post
adaptation non linear response compression.
Parameters
----------
RGB : array_like
*CMCCAT2000* transform sharpened *RGB* matrix.
Returns
-------
ndarray, (3,)
Uncompressed *CMCCAT2000* transform sharpened *RGB* matrix.
Examples
--------
>>> RGB = np.array([7.9463202, 7.94711528, 7.94899595])
>>> F_L = 1.16754446415
>>> post_adaptation_non_linear_response_compression_reverse(RGB, F_L) # noqa # doctest: +ELLIPSIS
array([ 19.9969397..., 20.0018612..., 20.0135052...])
"""
RGB_p = ((np.sign(RGB - 0.1) *
(100 / F_L) * ((27.13 * np.abs(RGB - 0.1)) /
(400 - np.abs(RGB - 0.1))) ** (1 / 0.42)))
return RGB_p
def opponent_colour_dimensions_forward(RGB):
"""
Returns opponent colour dimensions from given compressed *CMCCAT2000*
transform sharpened *RGB* matrix for forward *CIECAM02* implementation
Parameters
----------
RGB : array_like
Compressed *CMCCAT2000* transform sharpened *RGB* matrix.
Returns
-------
tuple
Opponent colour dimensions.
Examples
--------
>>> RGB = np.array([7.9463202, 7.94711528, 7.94899595])
>>> opponent_colour_dimensions_forward(RGB) # doctest: +ELLIPSIS
(-0.0006241..., -0.0005062...)
"""
R, G, B = np.ravel(RGB)
a = R - 12 * G / 11 + B / 11
b = (R + G - 2 * B) / 9
return a, b
def opponent_colour_dimensions_reverse(P, h):
"""
Returns opponent colour dimensions from given points :math:`P` and hue
:math:`h` in degrees for reverse *CIECAM02* implementation.
Parameters
----------
p : array_like
Points :math:`P`.
h : numeric
Hue :math:`h` in degrees.
Returns
-------
tuple
Opponent colour dimensions.
Examples
--------
>>> p = (30162.890815335879, 24.237205467134817, 1.05)
>>> h = -140.9515673417281
>>> opponent_colour_dimensions_reverse(p, h) # doctest: +ELLIPSIS
(-0.0006241..., -0.0005062...)
"""
P_1, P_2, P_3 = P
hr = math.radians(h)
sin_hr, cos_hr = math.sin(hr), math.cos(hr)
P_4 = P_1 / sin_hr
P_5 = P_1 / cos_hr
n = P_2 * (2 + P_3) * (460 / 1403)
if abs(sin_hr) >= abs(cos_hr):
b = n / (P_4 + (2 + P_3) * (220 / 1403) * (cos_hr / sin_hr) - (
27 / 1403) + P_3 * (6300 / 1403))
a = b * (cos_hr / sin_hr)
else:
a = n / (P_5 + (2 + P_3) * (220 / 1403) - (
(27 / 1403) - P_3 * (6300 / 1403)) * (sin_hr / cos_hr))
b = a * (sin_hr / cos_hr)
return a, b
def hue_angle(a, b):
"""
Returns the *hue* angle :math:`h` in degrees.
Parameters
----------
a : numeric
Opponent colour dimension :math:`a`.
b : numeric
Opponent colour dimension :math:`b`.
Returns
-------
numeric
*Hue* angle :math:`h` in degrees.
Examples
--------
>>> a = -0.0006241120682426434
>>> b = -0.0005062701067729668
>>> hue_angle(a, b) # doctest: +ELLIPSIS
219.0484326...
"""
h = math.degrees(np.arctan2(b, a)) % 360
return h
def hue_quadrature(h):
"""
Returns the hue quadrature from given hue :math:`h` angle in degrees.
Parameters
----------
h : numeric
Hue :math:`h` angle in degrees.
Returns
-------
numeric
Hue quadrature.
Examples
--------
>>> hue_quadrature(219.0484326582719) # doctest: +ELLIPSIS
278.0607358...
"""
h_i = HUE_DATA_FOR_HUE_QUADRATURE.get('h_i')
e_i = HUE_DATA_FOR_HUE_QUADRATURE.get('e_i')
H_i = HUE_DATA_FOR_HUE_QUADRATURE.get('H_i')
i = bisect.bisect_left(h_i, h) - 1
h_ii = h_i[i]
e_ii = e_i[i]
H_ii = H_i[i]
h_ii1 = h_i[i + 1]
e_ii1 = e_i[i + 1]
if h < 20.14:
H = 385.9
H += (14.1 * h / 0.856) / (h / 0.856 + (20.14 - h) / 0.8)
elif h >= 237.53:
H = H_ii
H += ((85.9 * (h - h_ii) / e_ii) /
((h - h_ii) / e_ii + (360 - h) / 0.856))
else:
H = H_ii
H += ((100 * (h - h_ii) / e_ii) /
((h - h_ii) / e_ii + (h_ii1 - h) / e_ii1))
return H
def eccentricity_factor(h):
"""
Returns the eccentricity factor :math:`e_t` from given hue :math:`h` angle
for forward *CIECAM02* implementation.
Parameters
----------
h : numeric
Hue :math:`h` angle in degrees.
Returns
-------
numeric
Eccentricity factor :math:`e_t`.
Examples
--------
>>> eccentricity_factor(-140.951567342) # doctest: +ELLIPSIS
1.1740054...
"""
e_t = 1 / 4 * (math.cos(2 + h * math.pi / 180) + 3.8)
return e_t
def achromatic_response_forward(RGB, N_bb):
"""
Returns the achromatic response :math:`A` from given compressed
*CMCCAT2000* transform sharpened *RGB* matrix and :math:`N_{bb}` chromatic
induction factor for forward *CIECAM02* | |
## Set new state value
def set_state(self, mid, value):
self._STATE[mid].appendleft(value)
return None
# Construct the measurable using id
def construct_measurable(self, mid, definition=None, init_value=None, depth=1, decdep=False):
"""
:param mid: the input id, if it is none, id will be generated
:param definition: the definition function of the measurable
:param init_value: the init value for the measurable
:param depth: the depth of the measurable
:param decdep: value indicating if the measurable is decision dependent
:return: nothing
"""
# check mid first
# construction requires prior registration
if mid not in self._ID:
raise Exception("the mid " + mid + " is not registered!")
# add the mid into the MID list, ID_TO_DEP, and DEFS
self._MID.append(mid)
self._ID_TO_DEP[mid] = bool(decdep)
self._DEFS[mid] = definition
if init_value is None: # will try to generate the init_value based on the definition
self._STATE[mid] = deque([], depth + 1)
# -------------------- Remove the old try/except block, because: -----------------------
# 1 if definition is None, getting this by throwing an exception will reduce performance, an if else check will be faster
# 2 if definition is not None, and there is an error generating values, then the exception will not be caught because those error are unexpected
if definition is not None: # if definition exist, calculate the init value, any error will throw to outer scope
self.set_state(mid, definition(self._STATE))
else:
self._STATE[mid] = deque(init_value, depth + 1)
return None
# construct the agent using id
def construct_sensor(self, mid, definition=None, init_value=None, depth=1, decdep=False):
"""
:param mid: the input id, if it is none, id will be generated
:param definition: the definition function of the measurable
:param init_value: the init value for the measurable
:param depth: the depth of the measurable
:param decdep: value indicating if the measurable is decision dependent
:return: nothing
"""
midc = name_comp(mid)
# check mid/midc first
if mid not in self._ID or midc not in self._ID:
raise Exception("the mid $" + mid + "$ is not registered!")
# compute initial value of sensor
if definition is None: # this is an action sensor, init value WILL be defaulted to False...
self.construct_measurable(mid, None, [False for ind in xrange(depth + 1)], depth, decdep)
self.construct_measurable(midc, None, [True for ind in xrange(depth + 1)], depth, decdep)
else:
self.construct_measurable(mid, definition, init_value, depth, decdep)
self.construct_measurable(midc, func_not(definition),
negate(init_value) if init_value is not None else None, depth, decdep)
return None
def construct_agent(self, id_agent, id_motivation, definition, decdep, params):
"""
:param id_agent: the agent id, must provide, cannot be None
:param id_motivation: id for phi value
:param definition: definition of the agent
:param decdep: bool indicating whether the agent is decision dependent
:param params: other parameters for the agent
:return: agent if succeed, None if fail
"""
# -------------------- Remove the old try/except block, because: -----------------------
# if id_agent is None, getting this by throwing an exception will reduce performance, an if else check will be faster
if id_agent in self._ID:
# construct new sensor and agent and append to agents list
self.construct_sensor(id_agent, definition, decdep=decdep)
new_agent = Agent(self, id_agent, id_motivation, params)
self._AGENTS[id_agent] = new_agent
ServiceWorld(service).add_agent(id_agent)
return new_agent
else:
raise Exception("the agent " + id_agent + " is not registered!")
## Update the experiment state and output a summary of decisions
def update_state(self, instruction='decide'):
#additional data reported to the experiment, by agent id:
agent_reports={}
#update initiation time:
initial_time=time.clock()
#purge the experiment's decision variable
id_dec = 'decision'
self.set_state(id_dec, [])
## Iterate over agents and decision-dependent measurables
# - if id of an agent, then form a decision and append to id_dec
# - if cid of an agent, then pass
# - if id of a measurable, update its value
for mid in (tmp_id for tmp_id in self._MID if self.dep(tmp_id)):
midc = name_comp(mid)
agentQ = mid in self._AGENTS or midc in self._AGENTS
if agentQ:
if mid in self._AGENTS: # if mid is an agent...
agent_reports[mid]={} #prepare a dictionary for agent's report
agent_start_time=time.clock()
## agent activity set to current reading
agent = self._AGENTS[mid]
agent._ACTIVE = self.this_state(mid)
activity = 'plus' if agent._ACTIVE else 'minus'
agent_reports[mid]['activity']=activity #report active snapshot
## ENRICHMENT
# agent analyzes outcomes of preceding decision
# cycle; if the prediction was too broad, add a
# sensor corresponding to the episode last observed
# and acknowledged by the active snapshot of the agent:
agent_reports[mid]['pred_too_general']=agent.report_current().subtract(agent.report_predicted()).weight()
if agent.report_current().subtract(agent.report_predicted()).weight() > 0:
new_episode = agent.report_last().intersect(agent.report_initmask())
sensors_to_be_added = [new_episode]
else:
sensors_to_be_added = []
sensors_not_to_be_removed = agent.ALL_FALSE()
## PRUNING
# abduction over negligible sensors
# STEP 1. gather the negligible delayed sensors;
# ($x$ is negligible if $x<x^*$)
# STEP 2. cluster them;
# STEP 3. perform abduction over the clusters.
# abduction over delayed sensors implying an initial sensor
# STEP 1. Gather them
#
#init_downs = agent.close_downwards([agent.generate_signal([sid]) for sid in agent.report_initial()])
#delayed_downs = [sig.subtract(agent.report_initmask()) for sig in init_downs]
# STEP 2. Perform abduction
#new_masks = agent.perform_abduction(delayed_downs)
# Restructuring
# STEP 1. Add new delayed sensors:
#sensors_to_be_added.extend(new_masks)
#ENRICHMENT DONE HERE
agent.delay(sensors_to_be_added)
# Step 2. Remove old sensors:
#sensors_to_be_removed = agent.ALL_FALSE()
#for sig in delayed_downs:
# sensors_to_be_removed.add(sig)
#agent.pad_signal(sensors_to_be_removed)
#agent.prune(sensors_to_be_removed)
# RANDOM THOUGHTS:
# There needs to be a budget of delayed units, and we should merely
# be rewiring them....
# Increases in the budget should be prompted by a need for more units, that is:
# add more units only when "pruning" ends up demanding more resources than available.
#
##THIS IS WHERE IT WOULD BE NICE TO HAVE AN ALTERNATIVE ARCHITECTURE
##LEARNING THE SAME STRUCTURE OR AN APPROXIMATION THEREOF
## compute and record e&p duration:
agent_e_and_p_ends=time.clock()
agent_reports[mid]['e_and_p_duration']=agent_e_and_p_ends-agent_start_time
## agent enters observation-deliberation-decision stage and reports:
agent_reports[mid]['deliberateQ'] = agent.decide()
agent_reports[mid]['decision'] = mid if mid in self.this_state(id_dec) else midc
## compute and report duration of decision cycle:
agent_decision_produced=time.clock()
agent_reports[mid]['decision_duration']=agent_decision_produced-agent_e_and_p_ends
## report the agent size:
agent_reports[mid]['size']=max(agent._SIZE['plus'],agent._SIZE['minus'])
## possible overriding instruction is considered
if instruction != 'decide':
if mid in instruction or midc in instruction:
# append override information to message
agent_reports[mid]['override']=mid if mid in instruction else midc
# replace last decision with provided instruction
self.this_state(id_dec).pop()
self.this_state(id_dec).append(mid if mid in instruction else midc)
else:
agent_reports[mid]['override']=''
else:
# if midc is an agent, a decision has already been reached, so no action is required
pass
else: # neither mid nor midc is an agent, so perform the value update
try: # attempt update using definition
self.set_state(mid, self._DEFS[mid](self._STATE))
except: # if no definition available, do nothing; this is a state variable evolving independently of the agent's actions, e.g., a pointer to a data structure.
pass
# At this point, there is a complete decision vector
action_signal = self.this_state(id_dec)
for mid in self._MID:
midc = name_comp(mid)
agentQ = mid in self._AGENTS or midc in self._AGENTS
depQ = self.dep(mid)
if agentQ:
# if mid is an agent (or its star)...
try:
# try evaluating the definition for this mid
self.set_state(mid, (self._DEFS[mid](self._STATE)))
#report final decision for mid
if mid in self._AGENTS:
agent_reports[mid]['final'] = mid if self.this_state(mid) else midc
except:
# if no definition, set the value according to $action_signal$
self.set_state(mid, (mid in action_signal))
else:
try:
# try updating using definition
self.set_state(mid, (self._DEFS[mid](self._STATE)))
except:
# if no definition available then do nothing; this is a state variable evolving independently of the agent's actions, e.g., a pointer to a data structure.
pass
final_time=time.clock()
time_elapsed=final_time-initial_time
return time_elapsed,agent_reports
class Agent(object):
### initialize an "empty" agent with prescribed learning parameters
def __init__(self, experiment, id_agent, id_motivation, params):
# Agent's ID and complementary ID:
self._ID = id_agent
self._CID = name_comp(id_agent)
# Agent's initialization parameters
self._PARAMS = params
# The experiment serving as the agent's environment
self._EXPERIMENT = experiment
# Agent's motivational signal:
self._MOTIVATION = id_motivation
# Flag denoting readiness state of the agent:
# - $False$ means initial sensors may still be added, only Python side is activated;
# - $True$ means agent has been constructed on CPP/GPU side, no initial sensors may be
# added beyond this point.
self._READY = | |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""NCF recommendation model with TPU embedding."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import threading
from absl import app as absl_app
from absl import flags
import numpy as np
import tensorflow as tf
from tensorflow.contrib import tpu
import tpu_embedding
from official.datasets import movielens
from official.recommendation import constants as rconst
from official.recommendation import data_preprocessing
from official.recommendation import neumf_model
from official.utils.flags import core as flags_core
_TOP_K = 10 # Top-k list for evaluation
# keys for evaluation metrics
_HR_KEY = "HR"
_NDCG_KEY = "NDCG"
_NUM_EPOCHS = 15
GraphSpec = collections.namedtuple("GraphSpec", [
"graph", "embedding", "run_tpu_loop", "get_infeed_thread_fn", "hook_before",
"hook_after"
])
def main(_):
"""Train NCF model and evaluate its hit rate (HR) metric."""
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project)
master = tpu_cluster_resolver.master()
ncf_dataset, cleanup_fn = data_preprocessing.instantiate_pipeline(
dataset=FLAGS.dataset,
data_dir=FLAGS.data_dir,
# TODO(shizhiw): support multihost.
batch_size=FLAGS.batch_size,
eval_batch_size=FLAGS.eval_batch_size,
num_neg=FLAGS.num_neg,
num_cycles=_NUM_EPOCHS,
epochs_per_cycle=1,
match_mlperf=FLAGS.ml_perf,
use_subprocess=FLAGS.use_subprocess,
cache_id=FLAGS.cache_id)
train_params, eval_params = create_params(ncf_dataset)
eval_graph_spec = build_graph(
eval_params, ncf_dataset, tpu_embedding.INFERENCE)
for epoch in range(_NUM_EPOCHS):
tf.logging.info("Training {}...".format(epoch))
# build training graph each epoch as number of batches per epoch
# i.e. batch_count might change by 1 between epochs.
train_graph_spec = build_graph(
train_params, ncf_dataset, tpu_embedding.TRAINING)
run_graph(master, train_graph_spec, epoch)
tf.logging.info("Evaluating {}...".format(epoch))
run_graph(master, eval_graph_spec, epoch)
cleanup_fn() # Cleanup data construction artifacts and subprocess.
def create_params(ncf_dataset):
"""Create params for the model."""
learning_rate = FLAGS.learning_rate
beta1 = FLAGS.beta1
beta2 = FLAGS.beta2
epsilon = FLAGS.epsilon
model_dir = FLAGS.model_dir
params = {
"learning_rate": learning_rate,
"num_users": ncf_dataset.num_users, # 138493 for 20m, 6040 for 1m.
"num_items": ncf_dataset.num_items, # 26744 for 20m
"mf_dim": FLAGS.num_factors,
"model_layers": [int(layer) for layer in FLAGS.layers],
"mf_regularization": FLAGS.mf_regularization,
"mlp_reg_layers": [float(reg) for reg in FLAGS.mlp_regularization],
"use_tpu": True,
"beta1": beta1,
"beta2": beta2,
"epsilon": epsilon,
"model_dir": model_dir,
}
train_params = copy.copy(params)
train_params["batch_size"] = FLAGS.batch_size
eval_params = copy.copy(params)
eval_params["batch_size"] = FLAGS.eval_batch_size
return train_params, eval_params
def run_graph(master, graph_spec, epoch):
"""Run graph_spec.graph with master."""
tf.logging.info("Running graph for epoch {}...".format(epoch))
with tf.Session(master, graph_spec.graph) as sess:
tf.logging.info("Initializing system for epoch {}...".format(epoch))
sess.run(tpu.initialize_system(
embedding_config=graph_spec.embedding.config_proto))
tf.logging.info("Running before hook for epoch {}...".format(epoch))
graph_spec.hook_before(sess, epoch)
tf.logging.info("Running infeed for epoch {}...".format(epoch))
infeed_thread_fn = graph_spec.get_infeed_thread_fn(sess)
infeed_thread = threading.Thread(target=infeed_thread_fn)
tf.logging.info("Staring infeed thread...")
infeed_thread.start()
tf.logging.info("Running TPU loop for epoch {}...".format(epoch))
graph_spec.run_tpu_loop(sess, epoch)
tf.logging.info("Joining infeed thread...")
infeed_thread.join()
tf.logging.info("Running after hook for epoch {}...".format(epoch))
graph_spec.hook_after(sess, epoch)
def build_graph(params, ncf_dataset, mode):
"""Build graph_spec with graph and some useful handles."""
tf.logging.info("building graph for mode {}.".format(mode))
with tf.Graph().as_default() as graph:
embedding = get_embedding(params, mode)
tf.logging.info("tpu_embedding_config_proto: {}."
.format(embedding.config_proto))
if mode == tpu_embedding.INFERENCE:
assert (params["batch_size"] % (embedding.num_cores *
(1 + rconst.NUM_EVAL_NEGATIVES))) == 0
input_fn, train_record_dir, batch_count = data_preprocessing.make_input_fn(
ncf_dataset=ncf_dataset, is_training=(mode == tpu_embedding.TRAINING))
get_infeed_thread_fn, infeed_queue = (
build_infeed(input_fn, params, batch_count, embedding, mode))
tpu_loop = build_tpu_loop(infeed_queue, params, batch_count, embedding,
mode)
def run_tpu_loop(sess, epoch):
if mode == tpu_embedding.TRAINING:
sess.run(tpu_loop)
else:
total_values, count_values = (sess.run(tpu_loop))
hr = np.sum(total_values) / np.sum(count_values)
tf.logging.info("HR = {} after epoch {}.".format(hr, epoch))
hook_before, hook_after = build_hooks(
mode, embedding, params, train_record_dir)
return GraphSpec(graph, embedding, run_tpu_loop, get_infeed_thread_fn,
hook_before, hook_after)
def build_infeed(input_fn, params, batch_count, embedding, mode):
"""Build infeed."""
if mode == tpu_embedding.TRAINING:
infeed_queue = tpu.InfeedQueue(
tuple_types=[tf.int32], tuple_shapes=[[params["batch_size"], 1]])
infeed_queue.set_number_of_shards(embedding.num_cores)
else:
infeed_queue = tpu.InfeedQueue(
tuple_types=[tf.float32], tuple_shapes=[[params["batch_size"], 1]])
infeed_queue.set_number_of_shards(embedding.num_cores)
def enqueue_ops_fn():
"""Create enqueue ops."""
ds = input_fn(params)
iterator = ds.make_one_shot_iterator()
if mode == tpu_embedding.TRAINING:
features, labels = iterator.get_next()
else:
features = iterator.get_next()
# TODO(shizhiw): speed up input pipeline by avoiding splitting and
# sparse tensor.
# TPU embedding enqueue.
users = features[movielens.USER_COLUMN]
items = features[movielens.ITEM_COLUMN]
sparse_features_list = []
users_per_core_list = tf.split(users,
embedding.num_cores_per_host)
items_per_core_list = tf.split(items,
embedding.num_cores_per_host)
for j in range(embedding.num_cores_per_host):
users_sparse = tf.SparseTensor(
indices=[[i, 0] for i in range(
embedding.batch_size_per_core)],
values=users_per_core_list[j],
dense_shape=[embedding.batch_size_per_core, 1])
items_sparse = tf.SparseTensor(
indices=[[i, 0] for i in range(
embedding.batch_size_per_core)],
values=items_per_core_list[j],
dense_shape=[embedding.batch_size_per_core, 1])
sparse_features = {
"mf_user": users_sparse,
"mlp_user": users_sparse,
"mf_item": items_sparse,
"mlp_item": items_sparse,
}
sparse_features_list.append(sparse_features)
enqueue_ops = embedding.generate_enqueue_ops(
sparse_features_list)
# TPU dense enqueue.
if mode == tpu_embedding.TRAINING:
# Infeed does not support bool.
labels = tf.cast(labels, tf.int32)
enqueue_ops.extend(
infeed_queue.split_inputs_and_generate_enqueue_ops([labels]))
else:
duplicate_mask = tf.cast(features[rconst.DUPLICATE_MASK], tf.float32)
enqueue_ops.extend(
infeed_queue.split_inputs_and_generate_enqueue_ops([duplicate_mask]))
return enqueue_ops
if len(embedding.hosts) != 1:
raise ValueError("len(embedding.hosts) should be 1, but got {}."
.format(embedding.hosts))
# TODO(shizhiw): check enqueue op location in tpu_embedding.py as user
# might fail to specify device for enqueue ops.
with tf.device(embedding.hosts[0]):
wrapped_enqueue_ops = wrap_computation_in_while_loop(
enqueue_ops_fn, n=batch_count, parallel_iterations=1)
def get_infeed_thread_fn(sess):
def infeed_thread_fn():
tf.logging.info("Enqueueing...")
sess.run(wrapped_enqueue_ops)
return infeed_thread_fn
return get_infeed_thread_fn, infeed_queue
def build_tpu_loop(infeed_queue, params, batch_count, embedding, mode):
"""Build op to run loops on TPU."""
if mode == tpu_embedding.TRAINING:
def tpu_step_fn(labels):
"""Create one step in training."""
logits = logits_fn(embedding, params)
if FLAGS.lazy_adam:
optimizer = tf.train.AdamOptimizer(
learning_rate=params["learning_rate"],
beta1=params["beta1"],
beta2=params["beta2"],
epsilon=params["epsilon"])
else:
optimizer = tf.contrib.opt.LazyAdamOptimizer(
learning_rate=params["learning_rate"],
beta1=params["beta1"],
beta2=params["beta2"],
epsilon=params["epsilon"])
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
# Softmax with the first column of ones is equivalent to sigmoid.
softmax_logits = tf.concat(
[tf.ones(logits.shape, dtype=logits.dtype), logits], axis=1)
loss = tf.losses.sparse_softmax_cross_entropy(
labels=labels, logits=softmax_logits)
minimize_op = optimizer.minimize(loss)
with tf.control_dependencies([minimize_op]):
send_gradient_op = embedding.generate_send_gradients_op()
return send_gradient_op
def tpu_loop_fn():
return tpu.repeat(batch_count, tpu_step_fn, infeed_queue=infeed_queue)
tpu_loop = tpu.shard(tpu_loop_fn, num_shards=embedding.num_cores)
return tpu_loop
else:
def tpu_step_fn(total, count, duplicate_mask):
"""One step in evaluation."""
logits = logits_fn(embedding, params)
in_top_k, _, metric_weights, _ = neumf_model.compute_top_k_and_ndcg(
logits, duplicate_mask, FLAGS.ml_perf)
metric_weights = tf.cast(metric_weights, tf.float32)
total += tf.reduce_sum(tf.multiply(in_top_k, metric_weights))
count += tf.reduce_sum(metric_weights)
return total, count
inputs = [tf.constant(0.), tf.constant(0.)]
def tpu_loop_fn():
return tpu.repeat(
batch_count, tpu_step_fn, inputs, infeed_queue=infeed_queue)
tpu_loop = tpu.shard(tpu_loop_fn, num_shards=embedding.num_cores)
return tpu_loop
def build_hooks(mode, embedding, params, train_record_dir):
"""Build `hook_before` and `hook_after` for `graph_spec`."""
saver = tf.train.Saver()
if mode == tpu_embedding.TRAINING:
def hook_before(sess, epoch):
if epoch == 0:
sess.run(tf.global_variables_initializer())
else:
saver.restore(sess,
"{}/model.ckpt.{}".format(
params["model_dir"], epoch-1))
sess.run(embedding.init_ops)
def hook_after(sess, epoch):
sess.run(embedding.retrieve_parameters_ops)
ckpt_path = saver.save(sess,
"{}/model.ckpt.{}".format(
params["model_dir"], epoch))
tf.logging.info("Model saved in path: {}."
.format(ckpt_path))
# must delete; otherwise the first epoch's data will always be used.
tf.gfile.DeleteRecursively(train_record_dir)
else:
def hook_before(sess, epoch):
saver.restore(sess,
"{}/model.ckpt.{}".format(
params["model_dir"], epoch))
sess.run(embedding.init_ops)
def hook_after(sess, epoch):
del sess, epoch
return hook_before, hook_after
def get_embedding(params, mode):
"""Create `TPUEmbedding` object."""
initializer = tf.random_normal_initializer(0., 0.01)
mlp_dim = params["model_layers"][0]//2
table_mf_user = tpu_embedding.TableConfig(
vocabulary_size=params["num_users"],
dimension=params["mf_dim"],
initializer=initializer, combiner="sum")
table_mlp_user = tpu_embedding.TableConfig(
vocabulary_size=params["num_users"],
dimension=mlp_dim,
initializer=initializer, combiner="sum")
table_mf_item = tpu_embedding.TableConfig(
vocabulary_size=params["num_items"],
dimension=params["mf_dim"],
initializer=initializer, combiner="sum")
table_mlp_item = tpu_embedding.TableConfig(
vocabulary_size=params["num_items"],
dimension=mlp_dim,
initializer=initializer, combiner="sum")
table_to_config_dict = {
"mf_user": table_mf_user,
"mlp_user": table_mlp_user,
"mf_item": table_mf_item,
"mlp_item": table_mlp_item,
}
feature_to_table_dict = {
"mf_user": "mf_user",
"mlp_user": "mlp_user",
"mf_item": "mf_item",
"mlp_item": "mlp_item",
}
learning_rate = params["learning_rate"]
if mode == tpu_embedding.TRAINING:
optimization_parameters = tpu_embedding.AdamParameters(
learning_rate, beta1=params["beta1"], beta2=params["beta2"],
epsilon=params["epsilon"],
lazy_adam=FLAGS.lazy_adam,
sum_inside_sqrt=FLAGS.adam_sum_inside_sqrt,
use_gradient_accumulation=FLAGS.use_gradient_accumulation,
pipeline_execution_with_tensor_core=(
FLAGS.pipeline_execution_with_tensor_core))
else:
optimization_parameters = None
embedding = tpu_embedding.TPUEmbedding(
table_to_config_dict,
feature_to_table_dict,
params["batch_size"],
num_hosts=1,
mode=mode,
optimization_parameters=optimization_parameters)
return embedding
def logits_fn(embedding, params):
"""Calculate logits."""
input_layer = embedding.get_activations()
# TODO(shizhiw): support one feature to multiple tables in tpu_embedding.py.
input_layer_mf_user = input_layer["mf_user"]
input_layer_mf_item = input_layer["mf_item"]
input_layer_mlp_user = input_layer["mlp_user"]
input_layer_mlp_item = input_layer["mlp_item"]
mf_user_input = tf.keras.layers.Input(tensor=input_layer_mf_user)
mf_item_input = tf.keras.layers.Input(tensor=input_layer_mf_item)
mlp_user_input = tf.keras.layers.Input(tensor=input_layer_mlp_user)
mlp_item_input = tf.keras.layers.Input(tensor=input_layer_mlp_item)
model_layers = params["model_layers"]
mlp_reg_layers = params["mlp_reg_layers"]
if model_layers[0] % 2 != 0:
raise ValueError("The first layer size should be multiple of 2!")
# GMF part
# Element-wise multiply
mf_vector = tf.keras.layers.multiply([mf_user_input, mf_item_input])
# MLP part
# Concatenation of two latent features
mlp_vector = tf.keras.layers.concatenate([mlp_user_input, mlp_item_input])
num_layer = len(model_layers) # Number of layers in the MLP
for layer in range(1, num_layer):
model_layer = tf.keras.layers.Dense(
model_layers[layer],
kernel_regularizer=tf.keras.regularizers.l2(mlp_reg_layers[layer]),
activation="relu")
mlp_vector = model_layer(mlp_vector)
# Concatenate GMF and MLP parts
predict_vector = tf.keras.layers.concatenate([mf_vector, mlp_vector])
# Final prediction layer
logits = tf.keras.layers.Dense(
1, activation=None, kernel_initializer="lecun_uniform",
name=movielens.RATING_COLUMN)(predict_vector)
return logits
def wrap_computation_in_while_loop(op_fn, n, parallel_iterations=10):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
ops = op_fn()
if not isinstance(ops, list):
ops = [ops]
with tf.control_dependencies(ops):
return i + 1
return tf.while_loop(
lambda i: tf.less(i, n),
computation, [tf.constant(0)],
parallel_iterations=parallel_iterations)
def define_ncf_flags():
"""Add flags for running ncf_main."""
flags.DEFINE_enum(
name="dataset", default="ml-20m",
enum_values=["ml-1m", "ml-20m"], case_sensitive=False,
help=flags_core.help_wrap(
"Dataset to be trained and evaluated."))
flags.DEFINE_string(
"data_dir", default=None,
help=("The directory where movielens data is stored."))
flags.DEFINE_integer(
"batch_size", default=2048*16, help="Batch size.")
flags.DEFINE_string(
"model_dir", default=None,
help=("The directory where the model and summaries are stored."))
flags.DEFINE_string(
"tpu", default=None,
help="The Cloud TPU to use for | |
#####################################################
# Title: HTML parse- and analyser
# Author: <NAME> (<EMAIL>)
# Licence: GPLv2
#####################################################
#!/usr/bin/python
import sys
import sqlite3
import datetime
import timeit
import math
import re
import pandas as pd
import numpy as np
from time import time, sleep
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score, cross_validate, train_test_split
#from sklearn.naive_bayes import *
from sklearn.metrics import roc_auc_score, balanced_accuracy_score, precision_recall_curve, classification_report, precision_recall_fscore_support, roc_curve, average_precision_score, auc, confusion_matrix
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.feature_selection import SelectKBest, chi2, VarianceThreshold
from sklearn.tree import DecisionTreeClassifier, export_text, export_graphviz
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from sklearn import tree
from mglearn import make_blobs
import matplotlib.pyplot as plt
import graphviz
'''
OPEN UP DATABASE AND FETCH DATA
'''
def connect_to_database(action, training_db, urls, unknown_samples, sha256):
# Open up training data set
training_db_connection = ""
training_db_cursor = ""
clfnb = MultinomialNB()
clfrf = RandomForestClassifier(random_state=0)
if action == False:
try:
# Connect to training set database
training_db_connection = sqlite3.connect(str(training_db))
training_db_cursor = training_db_connection.cursor()
# Queries for retrieving data to analyse
sql_reg_keys_query = "SELECT sha256, path FROM reg_keys;"
sql_strings_query = "SELECT strings FROM strings;"
training_db_cursor.execute(sql_reg_keys_query)
reg_key_pairs = training_db_cursor.fetchall()
reg_keys_dict = {}
unknown_samples_dict = {}
cur_sha = ""
cur_class_label = 3
class_label=0
reg_keys_list = []
dns_list = []
api_list = []
dll_list = []
tor_related = int(0)
api_string = ""
reg_keys_string = ""
dns_string =""
counter = 0
counter_length = len(reg_key_pairs)
reg_keys_combined = {}
unknown_samples_combined = {}
print("Fetching data from database. Processing.")
for pair in reg_key_pairs:
counter += 1
# Print progress
if counter % 100 == 0:
sys.stdout.write(".")
sys.stdout.flush()
if counter == (math.ceil(0.1 * counter_length)):
print("10%")
if counter == (math.ceil(0.2* counter_length)):
print("20%")
if counter == (math.ceil(0.5 * counter_length)):
print("50%")
if counter == (math.ceil(0.7 * counter_length)):
print("70%")
if counter == (math.ceil(0.8 * counter_length)):
print("80%")
if counter == (math.ceil(0.9 * counter_length)):
print("90%")
if counter == (math.ceil(0.95 * counter_length)):
print("95%")
if cur_sha != pair[0]:
cur_sha = pair[0]
reg_keys_list = []
api_list = []
dll_list = []
api_string = ""
dll_string = ""
dns_string = ""
reg_keys_string = ""
class_label =[]
else:
reg_keys_list.append(pair[1])
dns_query = "SELECT dns FROM network WHERE sha256=\'" + cur_sha + "\';"
training_db_cursor.execute(dns_query)
dns_list = training_db_cursor.fetchall()
api_query = "SELECT name,tor_related FROM api_calls WHERE sha256=\'" + cur_sha + "\';"
training_db_cursor.execute(api_query)
api_list = training_db_cursor.fetchall()
dll_query = "SELECT name FROM dlls WHERE sha256=\'" + cur_sha + "\';"
training_db_cursor.execute(dll_query)
dll_list = training_db_cursor.fetchall()
class_query = "SELECT tor_related FROM label WHERE sha256=\'" + cur_sha + "\';"
training_db_cursor.execute(class_query)
class_label = training_db_cursor.fetchall()
# Append data from database
api_string = "".join(str(api_list))
reg_keys_string = "".join(str(reg_keys_list))
dns_string = "".join(str(dns_list))
dll_string = "".join(str(dll_list))
# If 1 or 0, samples are correctly classified. 2 are prediction candidates.
if class_label:
if 0 in class_label[0]:
tor_related = int(0)
reg_keys_dict.update({cur_sha : [reg_keys_string, dns_string, dll_string, api_string, tor_related]})
reg_keys_combined.update({cur_sha : [reg_keys_string + " " + dns_string + " " + dll_string + " " + api_string, tor_related]})
if 1 in class_label[0]:
tor_related = int(1)
reg_keys_dict.update({cur_sha : [reg_keys_string, dns_string, dll_string, api_string, tor_related]})
reg_keys_combined.update({cur_sha : [reg_keys_string + " " + dns_string + " " + dll_string + " " + api_string, tor_related]})
if 2 in class_label[0]:
tor_related = int(2)
unknown_samples_dict.update({cur_sha : [reg_keys_string, dns_string, dll_string, api_string, tor_related]})
unknown_samples_combined.update({cur_sha : [reg_keys_string + " " + dns_string + dll_string + " " + api_string, tor_related]})
# Construct data frames from the feature dictionaries
training_df2 = pd.DataFrame(reg_keys_dict).T
training_df3 = pd.DataFrame(reg_keys_combined).T
# Construct a data frame for the unknown sample to be classified as well
unknown_df2 = pd.DataFrame(unknown_samples_dict).T
unknown_df3 = pd.DataFrame(unknown_samples_combined).T
# predictions_SHA256_list = build_classifiers(training_df2, training_df3, unknown_df2, unknown_df3)
predictions_SHA256_list = build_classifiers(training_df2, training_df3, unknown_df2, unknown_df3)
# If URLs flag enabled, go fetch URLs
if urls == True:
unique_onion_urls = []
print("|-- Tor Malware\n", predictions_SHA256_list)
for prediction_SHA256 in predictions_SHA256_list:
strings_query = "SELECT strings FROM strings WHERE sha256=\'" + prediction_SHA256 + "\';"
dns_query = "SELECT dns FROM network WHERE sha256=\'" + prediction_SHA256 + "\';"
training_db_cursor.execute(strings_query)
predicted_strings = training_db_cursor.fetchall()
# Find .onion URL
for onion_url in predicted_strings:
for string in onion_url:
#tmp_list = re.findall("http[s]?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+", string)
#tmp_list = re.findall("(\w+)://([\w\-\.]+)/(\w+).(\w+)", string)
tmp_list = re.findall(r"(?<=\.)([^.]+)(?:\.(?:onion|[^.]+(?:$|\n)))", string)
for i in tmp_list:
if i not in unique_onion_urls:
unique_onion_urls.append(i)
print("|--- Onion URLs \n", unique_onion_urls)
# Close DB connection
training_db_connection.commit()
training_db_connection.close()
except sqlite3.Error as err:
print("Sqlite error:", err)
finally:
training_db_connection.close()
"""
BUILD CLASSIFICATION MODELS
"""
def build_classifiers(df2, df3, unknown_df2, unknown_df3):
# Create bag of words for label:
vect = CountVectorizer(lowercase=False)
vect.fit_transform(df3[0])
X = vect.transform(df3[0])
# If there are unknown samples, make predictions on them.
X_unknown = vect.transform(unknown_df3[0])
# unknown_samples_SHA256 = df3[0].index
#X = pd.DataFrame(X_cand, columns=vect.get_feature_names())
# Target/class labels
y = df2[4]
y = y.astype('int')
# Feature selection
selector = VarianceThreshold(threshold=12)
selector.fit_transform(X)
# 80 / 20 split training and testing data. Shuffle just in case.
X_train, X_test, y_train, y_test = train_test_split(X, y, shuffle=True, test_size=0.2)
y_train = y_train.astype('int')
y_test = y_test.astype('int')
# Naive Bayes
mnb = MultinomialNB()
nb_clf = mnb.fit(X_train.toarray(), y_train.to_numpy())
mnb_prediction = nb_clf.predict(X_test.toarray())
mnb_proba = nb_clf.predict_proba(X_test)[:, 1]
mnb_cross_validation_scores = cross_validate(nb_clf, X_test.toarray(), y_test.to_numpy(), cv=5, scoring=["accuracy", "f1", "recall", "precision", "roc_auc"], n_jobs=-1, return_train_score=True)
mnb_cross_validation_score = cross_val_score(nb_clf, X_test.toarray(), y_test.to_numpy(), cv=5, scoring="accuracy")
mnb_roc_auc_avg = roc_auc_score(y_test, mnb_prediction)
mnb_balanced_accuracy = balanced_accuracy_score(y_test, mnb_prediction)
mnb_precision, mnb_recall, mnb_threshold = precision_recall_curve(y_test, nb_clf.predict(X_test.toarray()))
mnb_fpr = dict()
mnb_tpr = dict()
mnb_roc_auc = dict()
mnb_fpr[0], mnb_tpr[0], _ = roc_curve(y_test, mnb_proba)
mnb_roc_auc[0] = auc(mnb_fpr[0], mnb_tpr[0])
# Compute micro-average ROC curve and ROC area
mnb_fpr["micro"], mnb_tpr["micro"], _ = roc_curve(y_test.ravel(), mnb_proba.ravel())
mnb_roc_auc["micro"] = auc(mnb_fpr["micro"], mnb_tpr["micro"])
print("\n | ---- MNB cross validation score: ", mnb_cross_validation_score.mean())
print(classification_report(y_test, mnb_prediction))
# Support Vector Machine
clf = svm.SVC(C=2, cache_size=9000, probability=True).fit(X_train, y_train)
svm_proba = clf.predict_proba(X_test)[:, 1]
svm_prediction = clf.predict(X_test)
svm_unknown_sample_predicition = clf.predict(X_unknown)
svm_y_score = clf.decision_function(X_test)
svm_roc_auc_avg = roc_auc_score(y_test, svm_prediction)
svm_cross_validation_scores = cross_validate(clf, X_test, y_test, cv=5, scoring=["accuracy", "balanced_accuracy","precision","f1","recall","roc_auc"], return_train_score=True)
svm_cross_validation_score = cross_val_score(clf, X_test, y_test, cv=5, scoring="accuracy")
svm_precision, svm_recall, svm_threshold = precision_recall_curve(y_test, clf.decision_function(X_test))
svm_close_zero = np.argmin(np.abs(svm_threshold))
svm_fpr = dict()
svm_tpr = dict()
svm_roc_auc = dict()
#svm_fpr[0], svm_tpr[0], _ = roc_curve(y_test, svm_prediction)
svm_fpr[0], svm_tpr[0], _ = roc_curve(y_test, svm_proba)
#svm_fpr[1], svm_tpr[1], _ = roc_curve(y_test[:,1], svm_y_score[:, 1])
svm_roc_auc[0] = auc(svm_fpr[0], svm_tpr[0])
# Compute micro-average ROC curve and ROC area
svm_fpr["micro"], svm_tpr["micro"], _ = roc_curve(y_test.ravel(), svm_proba.ravel())
svm_roc_auc["micro"] = auc(svm_fpr["micro"], svm_tpr["micro"])
print("\n\n|---- SVM 10-fold cross validation accuracy score:{}".format(np.mean(svm_cross_validation_score)))
# Logistic regression classifier
logreg = LogisticRegression(max_iter=4000).fit(X_train, y_train)
lr_prediction = logreg.predict(X_test)
lr_unknown_predictions = logreg.predict(X_unknown)
lr_proba = logreg.predict_proba(X_test)[:, 1]
lr_decision_function = logreg.decision_function(X_test)
lr_cross_validation_scores = cross_validate(logreg, X_test, y_test, cv=5 , scoring=["accuracy", "balanced_accuracy", "precision", "f1", "recall","roc_auc"], n_jobs=-1, return_train_score=True)
lr_cross_validation_score = cross_val_score(logreg, X_test, y_test, cv=5 , scoring="accuracy")
lr_roc_auc = roc_auc_score(y_test, lr_prediction)
lr_fpr = dict()
lr_tpr = dict()
lr_roc_auc = dict()
lr_fpr[0], lr_tpr[0], _ = roc_curve(y_test, lr_proba)
lr_roc_auc[0] = auc(lr_fpr[0], lr_tpr[0])
lr_fpr["micro"], lr_tpr["micro"], _ = roc_curve(y_test.ravel(), lr_proba.ravel())
lr_roc_auc["micro"] = auc(lr_fpr["micro"], lr_tpr["micro"])
average_precision = average_precision_score(y_test, lr_decision_function)
precision, recall, threshold = precision_recall_curve(y_test, lr_decision_function)
precision1, recall1, f1, supp = precision_recall_fscore_support(y_test, lr_prediction, average="weighted", zero_division=1)
print("\n\n|---- LR 10-fold cross validation accuracy score:{}".format(np.mean(lr_cross_validation_score)))
print(classification_report(y_test, lr_prediction, zero_division=1))
# Random forest classifier
rf_clf = RandomForestClassifier(max_depth=2, random_state=0)
rf_clf.fit(X_train, y_train)
rf_prediction = rf_clf.predict(X_test)
rf_unknown_prediction = rf_clf.predict(X_unknown)
rf_proba = rf_clf.predict_proba(X_test)[:, 1]
rf_fpr = dict()
rf_tpr = dict()
rf_roc_auc = dict()
rf_fpr[0], rf_tpr[0], _ = roc_curve(y_test, rf_prediction)
rf_roc_auc[0] = auc(rf_fpr[0], rf_tpr[0])
rf_fpr["micro"], rf_tpr["micro"], _ = roc_curve(y_test.ravel(), rf_prediction.ravel())
rf_roc_auc["micro"] = auc(rf_fpr["micro"], rf_tpr["micro"])
rf_precision, rf_recall, rf_threshold = precision_recall_curve(y_test, rf_prediction)
rf_cross_validation_score = cross_val_score(rf_clf, X_test, y_test, cv=5 , scoring="accuracy")
print("\n\n|---- RF 10-fold cross validation accuracy score: {}", rf_cross_validation_score.mean())
print(classification_report(y_test,rf_prediction))
# Decision tree classifier
dt_clf = DecisionTreeClassifier()
dt_clf.fit(X_train, y_train)
dt_prediction = dt_clf.predict(X_test)
dt_unknown_prediction = dt_clf.predict(X_unknown)
dt_proba = dt_clf.predict_proba(X_test)[:, 1]
dt_fpr = dict()
dt_tpr = dict()
dt_roc_auc = dict()
dt_fpr[0], dt_tpr[0], _ = roc_curve(y_test, dt_prediction)
dt_roc_auc[0] = auc(dt_fpr[0], dt_tpr[0])
dt_fpr["micro"], dt_tpr["micro"], _ = roc_curve(y_test.ravel(), dt_prediction.ravel())
dt_roc_auc["micro"] = auc(dt_fpr["micro"], dt_tpr["micro"])
dt_precision, dt_recall, dt_threshold = precision_recall_curve(y_test, dt_prediction)
dt_cross_validation_score = cross_val_score(dt_clf, X_test, y_test, cv=5 , scoring="accuracy")
print("\n\n|---- DT 10-fold cross validation accuracy score:{} ", dt_cross_validation_score.mean())
print("\nDT score: ", dt_clf.score(X_test, y_test), "\nDT classification report\n\n", classification_report(y_test, dt_prediction), export_text(dt_clf, show_weights=True))
print("DT y_predictions: ", dt_prediction, "y_test: ", y_test)
# Verify predictions with the true labels
verified_predictions_SHA256_list = verify_predictions(dt_prediction, y_test)
# Unseen samples predictions
"""
# Draw AuC RoC
roc_plt = plt
roc_plt.figure()
lw = 2
roc_plt.plot(svm_fpr[0], svm_tpr[0], color='red', lw=lw, label='Support vector machine ROC curve (area = %0.2f)' % svm_roc_auc[0])
roc_plt.plot(lr_fpr[0], lr_tpr[0], color='yellow', lw=lw, label='Logistic regression ROC curve (area = %0.2f)' % lr_roc_auc[0])
roc_plt.plot(mnb_fpr[0], mnb_tpr[0], color='green', lw=lw, label='Multinomial naive Bayes ROC curve (area = %0.2f)' % mnb_roc_auc[0])
roc_plt.plot(rf_fpr[0], | |
<filename>django_evolution/tests/test_change_field.py<gh_stars>0
from __future__ import unicode_literals
from django.db import connection, models
from django_evolution.db import EvolutionOperationsMulti
from django_evolution.diff import Diff
from django_evolution.errors import SimulationFailure
from django_evolution.mutations import ChangeField
from django_evolution.mutators import AppMutator
from django_evolution.signature import (AppSignature,
ModelSignature,
ProjectSignature)
from django_evolution.tests.base_test_case import EvolutionTestCase
class ChangeSequenceFieldInitial(object):
def __init__(self, suffix):
self.suffix = suffix
def __call__(self):
return connection.ops.quote_name('char_field')
class ChangeAnchor1(models.Model):
value = models.IntegerField()
class ChangeBaseModel(models.Model):
my_id = models.AutoField(primary_key=True)
alt_pk = models.IntegerField()
int_field = models.IntegerField(db_column='custom_db_column')
int_field1 = models.IntegerField(db_index=True)
int_field2 = models.IntegerField(db_index=False)
int_field3 = models.IntegerField(unique=True)
int_field4 = models.IntegerField(unique=False)
char_field = models.CharField(max_length=20)
char_field1 = models.CharField(max_length=25, null=True)
char_field2 = models.CharField(max_length=30, null=False)
m2m_field1 = models.ManyToManyField(
ChangeAnchor1, db_table='change_field_non-default_m2m_table')
class ChangeFieldTests(EvolutionTestCase):
"""Testing ChangeField mutations."""
sql_mapping_key = 'change_field'
default_base_model = ChangeBaseModel
default_extra_models = [
('ChangeAnchor1', ChangeAnchor1),
]
def test_with_bad_app(self):
"""Testing ChangeField with application not in signature"""
mutation = ChangeField('TestModel', 'char_field1')
message = (
'Cannot change the field "char_field1" on model '
'"badapp.TestModel". The application could not be found in the '
'signature.'
)
with self.assertRaisesMessage(SimulationFailure, message):
mutation.run_simulation(app_label='badapp',
project_sig=ProjectSignature(),
database_state=None)
def test_with_bad_model(self):
"""Testing ChangeField with model not in signature"""
mutation = ChangeField('TestModel', 'char_field1')
project_sig = ProjectSignature()
project_sig.add_app_sig(AppSignature(app_id='tests'))
message = (
'Cannot change the field "char_field1" on model '
'"tests.TestModel". The model could not be found in the '
'signature.'
)
with self.assertRaisesMessage(SimulationFailure, message):
mutation.run_simulation(app_label='tests',
project_sig=project_sig,
database_state=None)
def test_with_bad_field(self):
"""Testing ChangeField with field not in signature"""
mutation = ChangeField('TestModel', 'char_field1')
model_sig = ModelSignature(model_name='TestModel',
table_name='tests_testmodel')
app_sig = AppSignature(app_id='tests')
app_sig.add_model_sig(model_sig)
project_sig = ProjectSignature()
project_sig.add_app_sig(app_sig)
message = (
'Cannot change the field "char_field1" on model '
'"tests.TestModel". The field could not be found in the '
'signature.'
)
with self.assertRaisesMessage(SimulationFailure, message):
mutation.run_simulation(app_label='tests',
project_sig=project_sig,
database_state=None)
def test_set_null_false_without_initial_value_raises_exception(self):
"""Testing ChangeField with setting null=False without initial value"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
alt_pk = models.IntegerField()
int_field = models.IntegerField(db_column='custom_db_column')
int_field1 = models.IntegerField(db_index=True)
int_field2 = models.IntegerField(db_index=False)
int_field3 = models.IntegerField(unique=True)
int_field4 = models.IntegerField(unique=False)
char_field = models.CharField(max_length=20)
char_field1 = models.CharField(max_length=25, null=False)
char_field2 = models.CharField(max_length=30, null=False)
m2m_field1 = models.ManyToManyField(
ChangeAnchor1, db_table='change_field_non-default_m2m_table')
message = (
'Cannot change the field "char_field1" on model '
'"tests.TestModel". A non-null initial value needs to be '
'specified in the mutation.'
)
with self.assertRaisesMessage(SimulationFailure, message):
self.perform_evolution_tests(
DestModel,
[
ChangeField('TestModel', 'char_field1', null=False),
],
("In model tests.TestModel:\n"
" In field 'char_field1':\n"
" Property 'null' has changed"),
[
"ChangeField('TestModel', 'char_field1',"
" initial=<<USER VALUE REQUIRED>>, null=False)",
])
def test_set_null_false_and_null_initial_value_raises_exception(self):
"""Testing ChangeField with setting null=False and null initial
value
"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
alt_pk = models.IntegerField()
int_field = models.IntegerField(db_column='custom_db_column')
int_field1 = models.IntegerField(db_index=True)
int_field2 = models.IntegerField(db_index=False)
int_field3 = models.IntegerField(unique=True)
int_field4 = models.IntegerField(unique=False)
char_field = models.CharField(max_length=20)
char_field1 = models.CharField(max_length=25, null=False)
char_field2 = models.CharField(max_length=30, null=False)
m2m_field1 = models.ManyToManyField(
ChangeAnchor1, db_table='change_field_non-default_m2m_table')
message = (
'Cannot change the field "char_field1" on model '
'"tests.TestModel". A non-null initial value needs to be '
'specified in the mutation.'
)
with self.assertRaisesMessage(SimulationFailure, message):
self.perform_evolution_tests(
DestModel,
[
ChangeField('TestModel', 'char_field1', null=False,
initial=None),
],
("In model tests.TestModel:\n"
" In field 'char_field1':\n"
" Property 'null' has changed"),
[
"ChangeField('TestModel', 'char_field1',"
" initial=<<USER VALUE REQUIRED>>, null=False)",
])
def test_set_null_false_and_initial_value(self):
"""Testing ChangeField with setting null=False and initial value"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
alt_pk = models.IntegerField()
int_field = models.IntegerField(db_column='custom_db_column')
int_field1 = models.IntegerField(db_index=True)
int_field2 = models.IntegerField(db_index=False)
int_field3 = models.IntegerField(unique=True)
int_field4 = models.IntegerField(unique=False)
char_field = models.CharField(max_length=20)
char_field1 = models.CharField(max_length=25, null=False)
char_field2 = models.CharField(max_length=30, null=False)
m2m_field1 = models.ManyToManyField(
ChangeAnchor1, db_table='change_field_non-default_m2m_table')
self.perform_evolution_tests(
DestModel,
[
ChangeField('TestModel', 'char_field1', null=False,
initial="abc's xyz"),
],
("In model tests.TestModel:\n"
" In field 'char_field1':\n"
" Property 'null' has changed"),
[
"ChangeField('TestModel', 'char_field1',"
" initial=<<USER VALUE REQUIRED>>, null=False)",
],
'SetNotNullChangeModelWithConstant')
def test_set_null_false_and_initial_callable(self):
"""Testing ChangeField with setting null=False and initial callable"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
alt_pk = models.IntegerField()
int_field = models.IntegerField(db_column='custom_db_column')
int_field1 = models.IntegerField(db_index=True)
int_field2 = models.IntegerField(db_index=False)
int_field3 = models.IntegerField(unique=True)
int_field4 = models.IntegerField(unique=False)
char_field = models.CharField(max_length=20)
char_field1 = models.CharField(max_length=25, null=False)
char_field2 = models.CharField(max_length=30, null=False)
m2m_field1 = models.ManyToManyField(
ChangeAnchor1, db_table='change_field_non-default_m2m_table')
self.perform_evolution_tests(
DestModel,
[
ChangeField(
'TestModel', 'char_field1', null=False,
initial=ChangeSequenceFieldInitial(
'SetNotNullChangeModel')),
],
("In model tests.TestModel:\n"
" In field 'char_field1':\n"
" Property 'null' has changed"),
[
"ChangeField('TestModel', 'char_field1',"
" initial=<<USER VALUE REQUIRED>>, null=False)",
],
'SetNotNullChangeModelWithCallable')
def test_set_null_true(self):
"""Testing ChangeField with setting null=True"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
alt_pk = models.IntegerField()
int_field = models.IntegerField(db_column='custom_db_column')
int_field1 = models.IntegerField(db_index=True)
int_field2 = models.IntegerField(db_index=False)
int_field3 = models.IntegerField(unique=True)
int_field4 = models.IntegerField(unique=False)
char_field = models.CharField(max_length=20)
char_field1 = models.CharField(max_length=25, null=True)
char_field2 = models.CharField(max_length=30, null=True)
m2m_field1 = models.ManyToManyField(
ChangeAnchor1, db_table='change_field_non-default_m2m_table')
self.perform_evolution_tests(
DestModel,
[
ChangeField('TestModel', 'char_field2', initial=None,
null=True),
],
("In model tests.TestModel:\n"
" In field 'char_field2':\n"
" Property 'null' has changed"),
[
"ChangeField('TestModel', 'char_field2',"
" initial=None, null=True)",
],
'SetNullChangeModel')
def test_set_null_true_when_true_noop(self):
"""Testing ChangeField with setting null=True when already True
is noop
"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
alt_pk = models.IntegerField()
int_field = models.IntegerField(db_column='custom_db_column')
int_field1 = models.IntegerField(db_index=True)
int_field2 = models.IntegerField(db_index=False)
int_field3 = models.IntegerField(unique=True)
int_field4 = models.IntegerField(unique=False)
char_field = models.CharField(max_length=20)
char_field1 = models.CharField(max_length=25, null=True)
char_field2 = models.CharField(max_length=30, null=False)
m2m_field1 = models.ManyToManyField(
ChangeAnchor1, db_table='change_field_non-default_m2m_table')
self.perform_evolution_tests(
DestModel,
[
ChangeField('TestModel', 'char_field1', null=True),
],
None,
[
"ChangeField('TestModel', 'char_field1', null=True)",
],
'NoOpChangeModel',
expect_noop=True)
def test_increase_max_length(self):
"""Testing ChangeField with increasing max_length of CharField"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
alt_pk = models.IntegerField()
int_field = models.IntegerField(db_column='custom_db_column')
int_field1 = models.IntegerField(db_index=True)
int_field2 = models.IntegerField(db_index=False)
int_field3 = models.IntegerField(unique=True)
int_field4 = models.IntegerField(unique=False)
char_field = models.CharField(max_length=45)
char_field1 = models.CharField(max_length=25, null=True)
char_field2 = models.CharField(max_length=30, null=False)
m2m_field1 = models.ManyToManyField(
ChangeAnchor1, db_table='change_field_non-default_m2m_table')
self.perform_evolution_tests(
DestModel,
[
ChangeField('TestModel', 'char_field', initial=None,
max_length=45),
],
("In model tests.TestModel:\n"
" In field 'char_field':\n"
" Property 'max_length' has changed"),
[
"ChangeField('TestModel', 'char_field',"
" initial=None, max_length=45)",
],
'IncreasingMaxLengthChangeModel')
def test_decrease_max_length(self):
"""Testing ChangeField with decreasing max_length of CharField"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
alt_pk = models.IntegerField()
int_field = models.IntegerField(db_column='custom_db_column')
int_field1 = models.IntegerField(db_index=True)
int_field2 = models.IntegerField(db_index=False)
int_field3 = models.IntegerField(unique=True)
int_field4 = models.IntegerField(unique=False)
char_field = models.CharField(max_length=1)
char_field1 = models.CharField(max_length=25, null=True)
char_field2 = models.CharField(max_length=30, null=False)
m2m_field1 = models.ManyToManyField(
ChangeAnchor1, db_table='change_field_non-default_m2m_table')
self.perform_evolution_tests(
DestModel,
[
ChangeField('TestModel', 'char_field', initial=None,
max_length=1),
],
("In model tests.TestModel:\n"
" In field 'char_field':\n"
" Property 'max_length' has changed"),
[
"ChangeField('TestModel', 'char_field',"
" initial=None, max_length=1)",
],
'DecreasingMaxLengthChangeModel')
def test_change_db_column(self):
"""Testing ChangeField with setting db_column"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
alt_pk = models.IntegerField()
int_field = models.IntegerField(db_column='customised_db_column')
int_field1 = models.IntegerField(db_index=True)
int_field2 = models.IntegerField(db_index=False)
int_field3 = models.IntegerField(unique=True)
int_field4 = models.IntegerField(unique=False)
char_field = models.CharField(max_length=20)
char_field1 = models.CharField(max_length=25, null=True)
char_field2 = models.CharField(max_length=30, null=False)
m2m_field1 = models.ManyToManyField(
ChangeAnchor1, db_table='change_field_non-default_m2m_table')
self.perform_evolution_tests(
DestModel,
[
ChangeField('TestModel', 'int_field', initial=None,
db_column='customised_db_column'),
],
("In model tests.TestModel:\n"
" In field 'int_field':\n"
" Property 'db_column' has changed"),
[
"ChangeField('TestModel', 'int_field',"
" db_column='customised_db_column', initial=None)",
],
'DBColumnChangeModel')
def test_change_m2m_db_table(self):
"""Testing ChangeField with setting db_table on ManyToManyField"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
alt_pk = models.IntegerField()
int_field = models.IntegerField(db_column='custom_db_column')
int_field1 = models.IntegerField(db_index=True)
int_field2 = models.IntegerField(db_index=False)
int_field3 = models.IntegerField(unique=True)
int_field4 = models.IntegerField(unique=False)
char_field = models.CharField(max_length=20)
char_field1 = models.CharField(max_length=25, null=True)
char_field2 = models.CharField(max_length=30, null=False)
m2m_field1 = models.ManyToManyField(
ChangeAnchor1, db_table='custom_m2m_db_table_name')
self.perform_evolution_tests(
DestModel,
[
ChangeField('TestModel', 'm2m_field1', initial=None,
db_table='custom_m2m_db_table_name'),
],
("In model tests.TestModel:\n"
" In field 'm2m_field1':\n"
" Property 'db_table' has changed"),
[
"ChangeField('TestModel', 'm2m_field1',"
" db_table='custom_m2m_db_table_name', initial=None)",
],
'M2MDBTableChangeModel')
def test_set_db_index_true(self):
"""Testing ChangeField with setting db_index=True"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
alt_pk = models.IntegerField()
int_field = models.IntegerField(db_column='custom_db_column')
int_field1 = models.IntegerField(db_index=True)
int_field2 = models.IntegerField(db_index=True)
int_field3 = models.IntegerField(unique=True)
int_field4 = models.IntegerField(unique=False)
char_field = models.CharField(max_length=20)
char_field1 = models.CharField(max_length=25, null=True)
char_field2 = models.CharField(max_length=30, null=False)
m2m_field1 = models.ManyToManyField(
ChangeAnchor1, db_table='change_field_non-default_m2m_table')
self.assertIsNone(self.database_state.find_index(
table_name='tests_testmodel',
columns=['int_field2']))
self.perform_evolution_tests(
DestModel,
[
ChangeField('TestModel', 'int_field2', initial=None,
db_index=True),
],
("In model tests.TestModel:\n"
" In field 'int_field2':\n"
" Property 'db_index' has changed"),
[
"ChangeField('TestModel', 'int_field2', db_index=True,"
" initial=None)",
],
'AddDBIndexChangeModel')
self.assertIsNotNone(self.test_database_state.find_index(
table_name='tests_testmodel',
columns=['int_field2']))
def test_set_db_index_true_and_existing_index(self):
"""Testing ChangeField with setting db_index=True and existing index
in the database
"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
alt_pk = models.IntegerField()
int_field = models.IntegerField(db_column='custom_db_column')
int_field1 = models.IntegerField(db_index=True)
int_field2 = models.IntegerField(db_index=True)
int_field3 = models.IntegerField(unique=True)
int_field4 = models.IntegerField(unique=False)
char_field = models.CharField(max_length=20)
char_field1 = models.CharField(max_length=25, null=True)
char_field2 = models.CharField(max_length=30, null=False)
m2m_field1 = models.ManyToManyField(
ChangeAnchor1, db_table='change_field_non-default_m2m_table')
evolver = EvolutionOperationsMulti('default',
self.database_state).get_evolver()
index_name = evolver.get_default_index_name(
'tests_testmodel', DestModel._meta.get_field('int_field2'))
self.database_state.add_index(table_name='tests_testmodel',
index_name=index_name,
columns=['int_field2'],
unique=False)
self.assertIsNotNone(self.database_state.find_index(
table_name='tests_testmodel',
columns=['int_field2']))
self.perform_evolution_tests(
DestModel,
[
ChangeField('TestModel', 'int_field2', initial=None,
db_index=True),
],
("In model tests.TestModel:\n"
" In field 'int_field2':\n"
" Property 'db_index' has changed"),
[
"ChangeField('TestModel', 'int_field2', db_index=True,"
" initial=None)",
],
'AddDBIndexNoOpChangeModel',
rescan_indexes=False)
self.assertIsNotNone(self.test_database_state.find_index(
table_name='tests_testmodel',
columns=['int_field2']))
def test_set_db_index_false(self):
"""Testing ChangeField with setting db_index=False"""
class DestModel(models.Model):
my_id = models.AutoField(primary_key=True)
alt_pk = models.IntegerField()
int_field = models.IntegerField(db_column='custom_db_column')
int_field1 = models.IntegerField(db_index=False)
int_field2 = models.IntegerField(db_index=False)
int_field3 = models.IntegerField(unique=True)
int_field4 = models.IntegerField(unique=False)
char_field = models.CharField(max_length=20)
char_field1 = models.CharField(max_length=25, null=True)
char_field2 = models.CharField(max_length=30, | |
user = cred_data[1]
pwd = cred_data[2]
lhost = get_local_ip(get_iface())
cmd, output, err = await run_smb_login(client, c_id, lhost, threads, user, pwd, dom, target_ips)
await parse_smb_login(lock, c_id, output, domain_data)
async def make_session_busy(sess_num, sess_data):
while sess_data[sess_num][b'busy'] == b'True':
await asyncio.sleep(1)
sess_data[sess_num][b'busy'] == b'True'
def make_session_not_busy(sess_num, sess_data):
sess_data[sess_num][b'busy'] == b'False'
async def check_for_DA(lock, client, creds, sess_num, domain_data):
dom_user = creds.split(':', 1)[0]
DAs = []
cred_data = None
# Get a copy of domain_data['domain_admins']
with await lock:
for x in domain_data['domain_admins']:
DAs.append(x)
# Got a hash
if creds.count(':') > 5 and creds.endswith(':::'):
hash_split = creds.split(':')
user = hash_split[0]
rid = hash_split[1]
lm = hash_split[2]
ntlm = hash_split[3]
# In case there's a DA like LAB\Administrator we don't want to trigger
# on all RID 500 Admin usernames
if user.lower() == 'administrator':
return
if len(DAs) > 0:
for c in DAs:
da_user = c.split('\\')[1]
if user.lower() == da_user.lower():
dom = '.'
lm_ntlm = lm+':'+ntlm
cred_data = (dom, user, lm_ntlm)
break
# plaintext
else:
split_creds = creds.split(':', 1)
dom_user = split_creds[0]
pwd = split_creds[1]
dom_user_split = dom_user.split('\\', 1)
dom = dom_user_split[0]
user = dom_user_split[1]
for c in DAs:
if dom_user.lower() == c.lower():
cred_data = (dom, user, pwd)
break
if cred_data:
msg = 'Potential domain admin found! '+creds
print_good(msg, 'Session', sess_num)
if len(domain_data['domain_controllers']) > 0:
# This will run smb_login and parse_smb_login will tell us if its DA
await check_creds_against_DC(lock, client, sess_num, cred_data, domain_data)
async def get_passwords(lock, client, sess_num, sess_data, domain_data):
await run_mimikatz(lock, client, sess_num, sess_data, domain_data)
await run_hashdump(lock, client, sess_num, sess_data, domain_data)
#mimikittenz
async def run_hashdump(lock, client, sess_num, sess_data, domain_data):
cmd = 'hashdump'
end_strs = None
await make_session_busy(sess_num, sess_data)
output, err = await run_session_cmd(client, sess_num, sess_data, cmd, end_strs)
make_session_not_busy(sess_num, sess_data)
if err:
return
for l in output.splitlines():
l = l.strip().decode('utf8')
if l not in domain_data['creds']:
domain_data['creds'].append(l)
msg = 'Hashdump creds - '+l
print_good(msg, 'Session', sess_num)
#await check_for_DA(lock, client, l, sess_num, domain_data)
def get_console_ids(client):
c_ids = [x[b'id'] for x in client.call('console.list')[b'consoles']]
print_info('Opening Metasploit consoles', None, None)
while len(c_ids) < 5:
client.call('console.create')
c_ids = [x[b'id'] for x in client.call('console.list')[b'consoles']] # Wait for response
time.sleep(2)
for c_id in c_ids:
client.call('console.read', [c_id])[b'data'].decode('utf8').splitlines()
return c_ids
async def run_msf_module(client, c_id, mod, rhost_var, target_ips, lhost, extra_opts, start_cmd, end_strs):
payload = 'windows/x64/meterpreter/reverse_https'
cmd = create_msf_cmd(mod, rhost_var, target_ips, lhost, payload, extra_opts, start_cmd)
mod_out, err = await run_console_cmd(client, c_id, cmd, end_strs)
return (cmd, mod_out, err)
def create_msf_cmd(module_path, rhost_var, target_ips, lhost, payload, extra_opts, start_cmd):
cmds = ('use {}\n'
'set {} {}\n'
'set LHOST {}\n'
'set payload {}\n'
'{}\n'
'{}\n').format(module_path, rhost_var, target_ips, lhost, payload, extra_opts, start_cmd)
return cmds
async def run_console_cmd(client, c_id, cmd, end_strs):
'''
Runs module and gets output
'''
err = None
cmd_split = cmd.splitlines()
module = cmd_split[0].split()[1]
print_info('Running MSF module [{}]'.format(module), 'Console', c_id)
client.call('console.write',[c_id, cmd])
output = await get_console_output(client, c_id, end_strs)
err = get_output_errors(output, cmd)
if err:
print_bad(err, 'Console', c_id)
return (output, err)
async def get_console_output(client, c_id, end_strs, timeout=60):
'''
The only way to get console busy status is through console.read or console.list
console.read clears the output buffer so you gotta use console.list
but console.list requires you know the list offset of the c_id console
so this ridiculous list comprehension seems necessary to avoid assuming
what the right list offset might be
'''
counter = 0
sleep_secs = 1
list_offset = int([x[b'id'] for x in client.call('console.list')[b'consoles'] if x[b'id'] is c_id][0])
output = b''
# Give it a chance to start
await asyncio.sleep(sleep_secs)
# Get any initial output
output += client.call('console.read', [c_id])[b'data']
while client.call('console.list')[b'consoles'][list_offset][b'busy'] == True:
output += client.call('console.read', [c_id])[b'data']
await asyncio.sleep(sleep_secs)
counter += sleep_secs
while True:
output += client.call('console.read', [c_id])[b'data']
if end_strs:
if any(end_strs in output for end_strs in end_strs):
break
if counter > timeout:
break
await asyncio.sleep(sleep_secs)
counter += sleep_secs
# Get remaining output
output += client.call('console.read', [c_id])[b'data']
debug_info(output, 'Console', c_id)
return output
async def get_nonbusy_cid(client, c_ids):
while True:
for c_id in c_ids:
list_offset = int([x[b'id'] for x in client.call('console.list')[b'consoles'] if x[b'id'] is c_id][0])
if client.call('console.list')[b'consoles'][list_offset][b'busy'] == False:
return c_id
await asyncio.sleep(1)
def plaintext_or_hash(creds):
if creds.count(':') == 6 and creds.endswith(':::'):
return 'hash'
else:
return 'plaintext'
def parse_creds(creds):
cred_type = plaintext_or_hash(creds)
if cred_type == 'hash':
hash_split = creds.split(':')
rid = hash_split[1]
user = hash_split[0]
lm = hash_split[2]
ntlm = hash_split[3] # ntlm hash
pwd = lm+':'+ntlm # need lm:ntlm for PTH in metasploit
dom = '.' # this also means WORKGROUP or non-domain login in msf
elif cred_type == 'plaintext':
cred_split = creds.split(':')
user = cred_split[0]
# Remove domain from user
if "\\" in user:
user_split = user.split("\\")
user = user_split[1]
dom = user_split[0]
pwd = cred_split[1]
rid = None
return dom, user, pwd, rid
async def spread(lock, client, c_ids, lhost, sess_data, domain_data):
while True:
# Copy the dict so we can loop it safely
dom_data_copy = domain_data.copy()
for c in dom_data_copy['creds']:
if c not in dom_data_copy['checked_creds']:
# Set up a dict where the key is the creds and the val are the hosts we are admin on
dom_data_copy['checked_creds'][c] = []
await run_smb_brute(lock, client, c_ids, lhost, c, domain_data, dom_data_copy)
await get_new_shells(lock, client, c_ids, lhost, sess_data, domain_data, dom_data_copy)
await asyncio.sleep(1)
async def run_smb_login(client, c_id, lhost, threads, user, pwd, dom, target_ips):
mod = 'auxiliary/scanner/smb/smb_login'
rhost_var = 'RHOSTS'
start_cmd = 'run'
extra_opts = ('set threads {}\n'
'set smbuser {}\n'
'set smbpass {}\n'
'set smbdomain {}'.format(threads, user, pwd, dom))
end_strs = [b'Auxiliary module execution completed']
if 'file:' in target_ips:
print_info('Spraying credentials [{}:{}] against hosts'.format(user, pwd), 'Console', c_id)
else:
print_info('Trying credentials [{}:{}] against {}'.format(user, pwd, target_ips), 'Console', c_id)
cmd, output, err = await run_msf_module(client, c_id, mod, rhost_var, target_ips, lhost, extra_opts, start_cmd, end_strs)
return (cmd, output, err)
async def run_smb_brute(lock, client, c_ids, lhost, creds, domain_data, dom_data_copy):
cred_type = plaintext_or_hash(creds)
dom, user, pwd, rid = parse_creds(creds)
threads = '32'
# Just smb brute with rid 500 for now
if cred_type == 'hash' and rid != '500':
return
filename = 'unchecked_hosts'
domain_data_key = 'hosts'
target_ips = create_hostsfile(dom_data_copy, filename, domain_data_key)
c_id = await get_nonbusy_cid(client, c_ids)
cmd, output, err = await run_smb_login(client, c_id, lhost, threads, user, pwd, dom, target_ips)
await parse_module_output(lock, c_id, err, cmd, output, domain_data)
async def get_admin_session_data(lock, sess_data, domain_data):
# Get all session IPs and figure out if they're admin shells so we don't overlap our spread
admin_sess_data = {}
with await lock:
for sess_num in sess_data:
ip = sess_data[sess_num][b'tunnel_peer'].split(b':')[0]
utf8_ip = ip.decode('utf8')
if b'admin_shell' not in sess_data[sess_num]:
continue
# In case we have multiple shells on the same IP, we must collect
# all their admin_shell properties to check later if we have any
# admin shells on that IP
admin_sess_data[ip] = []
admin_shell = sess_data[sess_num][b'admin_shell']
admin_sess_data[ip].append(admin_shell)
# Remove IP from pending_shell_ips which exists so spread() doesn't
# spread to an IP that's waiting for psexec_psh to finish
if admin_shell == b'True':
if utf8_ip in domain_data['pending_shell_ips']:
domain_data['pending_shell_ips'].remove(utf8_ip)
return admin_sess_data
async def get_new_shells(lock, client, c_ids, lhost, sess_data, domain_data, dom_data_copy):
admin_session_data = await get_admin_session_data(lock, sess_data, domain_data)
c_id = await get_nonbusy_cid(client, c_ids)
# run psexec_psh on all ips that we either don't have a shell on already or don't have an admin shell on
# dom_data_copy['checked_creds']['LAB\\dan:P@ssw0rd'] = [list of ips we have admin for those creds]
for creds in dom_data_copy['checked_creds']:
for admin_ip in dom_data_copy['checked_creds'][creds]:
bytes_admin_ip = admin_ip.encode()
# Shells take a minute to open so we don't want to double up on shells while they open
if admin_ip not in domain_data['pending_shell_ips']:
# Check if the IP we have admin on already has a session
if bytes_admin_ip in admin_session_data:
# If we have a shell on it but we're not admin, then continue get admin shell
# admin_shell_vals = [b'True', b'False', b'True'] depending on how many shells we have on that IP
# Making design decision here to not check if the session is broken or not because it's too easy
# for that to lead to infinite loops of spreading with broken sess after broken sess
admin_shell_vals = [x for x in admin_session_data[bytes_admin_ip]]
if b'True' in admin_shell_vals:
continue
# Either we don't have this IP in our session, or there's no admin session open on it
await run_psexec_psh(lock, client, c_id, creds, admin_ip, lhost, domain_data)
# | |
<filename>BDMLtools/selector/bin_fun.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 27 23:08:38 2021
@author: zengke
"""
import pandas as pd
import numpy as np
from pandas.api.types import is_numeric_dtype,is_string_dtype,is_array_like
from sklearn.base import BaseEstimator
from joblib import Parallel,delayed,effective_n_jobs
import warnings
from itertools import groupby
from sklearn.cluster import KMeans
from scipy.stats import chi2,chi2_contingency
from warnings import warn
from BDMLtools.fun import raw_to_bin_sc,Specials
from BDMLtools.base import Base
from BDMLtools.report.report import varReportSinge
def R_pretty(low, high, n):
'''
pretty breakpoints, the same as pretty function in R
Params
------
low: minimal value
low: maximal value
n: number of intervals
Returns
------
numpy.ndarray
returns a breakpoints array
'''
# nicenumber
def nicenumber(x):
exp = np.floor(np.log10(abs(x)))
f = abs(x) / 10**exp
if f < 1.5:
nf = 1.
elif f < 3.:
nf = 2.
elif f < 7.:
nf = 5.
else:
nf = 10.
return np.sign(x) * nf * 10.**exp
# pretty breakpoints
d = abs(nicenumber((high-low)/(n-1)))
miny = np.floor(low / d) * d
maxy = np.ceil (high / d) * d
return np.arange(miny, maxy+0.5*d, d)
def remove_outlier(col):
'''
remove outlier using iqr
Params
------
col: numpy.ndarray
Returns
------
numpy.ndarray
returns col with outliers removed
'''
iq=np.nanpercentile(col,[1, 25, 75, 99])
iqr = iq[2] - iq[1]
if iqr == 0:
col_rm_outlier=col[(col >= iq[0]) & (col<=iq[3])]
else:
col_rm_outlier=col[(col >= iq[1]-3*iqr) & (col<=iq[2]+3*iqr)]
if np.unique(col_rm_outlier).size==1:
col_rm_outlier=col
return col_rm_outlier
def is_monotonic(col):
return np.all(np.diff(col) > 0) or np.all(np.diff(col) <0)
def check_monotonirc(col,y,cuts,ws=None):
if is_array_like(ws):
if ws.size!=y.size:
raise ValueError('length of weight not equal to y')
y = y * ws
count = ws
else:
count=np.ones(col.size)
cuts_remain=[]
for point in cuts:
col_group=np.digitize(col,sorted([-np.inf]+[point]+cuts_remain+[np.inf]))
g=np.zeros(0)
b=np.zeros(0)
count_g=np.zeros(0)
for i in np.unique(col_group):
y_g=y[col_group==i]
unit_g=count[col_group==i]
count_g=np.append(count_g,np.sum(unit_g))
b=np.append(b,np.sum(y_g))
g=np.append(g,np.sum(unit_g) - np.sum(y_g))
bad_prob=b/count_g
if is_monotonic(bad_prob):
cuts_remain.append(point)
return np.array(cuts_remain)
def rm_edge_point(col,cut_off):
cut_off=cut_off[(cut_off>col.min()) & (cut_off<col.max())]
cut_remain=[]
for point in cut_off:
cut_g=np.digitize(col,sorted([-np.inf]+[point]+cut_remain+[np.inf]),right=False)
if sum(~np.isin(np.arange(start=1,stop=cut_g.max()+1),np.unique(cut_g)))==0:
cut_remain.append(point)
cut_remain=np.array(cut_remain)
return cut_remain
def binFreq(X,y,bin_num_limit=10,special_values=None,ws=None,coerce_monotonic=False):
"""
等频分箱
Parameters:
--
X
y
bin_num_limit:
special_values
"""
def get_breaks(col,y,bin_num_limit=bin_num_limit,ws=ws,special_values=special_values,coerce_monotonic=coerce_monotonic):
col=Specials()._sp_replace_single(col,Specials()._check_spvalues(col.name,special_values),fill_num=np.nan,fill_str='special')
if col.isnull().all():
warn('nan column:{},return blank breaks'.format(col.name))
breaks=[]
elif np.max(col) == np.min(col):
warn('constant column:{},return blank breaks'.format(col.name))
breaks=[]
elif is_numeric_dtype(col):
y=y[~np.isnan(col)]
if is_array_like(ws) and coerce_monotonic:
ws=ws[~np.isnan(col)]
if ws.size!=y.size:
raise ValueError('length of weight not equal to y')
else:
ws=np.ones(y.size)
col=col[~np.isnan(col)]
if np.unique(col).size<bin_num_limit:
n_bins_adj=np.unique(col).size
else:
n_bins_adj=bin_num_limit
breaks=np.percentile(col,np.arange(n_bins_adj+1)/n_bins_adj*100)[1:-1]
#adjust bin for extreamly unbalanced count distr
if ws[col<np.min(breaks)].sum()/ws.sum()<=min(1/n_bins_adj,0.01):
breaks=breaks[breaks!=np.min(breaks)]
breaks=np.unique(breaks).tolist()
if coerce_monotonic:
breaks=check_monotonirc(col,y,breaks,ws=ws).tolist()
elif is_string_dtype(col):
breaks=col.unique().tolist()
else:
raise ValueError('dtype in only number and object')
return breaks
breaks_list={name_value[0]:get_breaks(name_value[1],y)
for name_value in X.iteritems()}
bins={col:varReportSinge().report(X[col],y,breaks_list[col],ws,special_values) for col in X.columns}
return breaks_list,bins
class binKmeans(Base,Specials,BaseEstimator):
"""
基于Kmeans的分箱调整算法:一种自动非优化分箱算法
一般通过细分箱后各个分箱的BadRate近似时需要合并,本算法可自动实现这一过程。注意‘missing’值与'special'值将不进行合并
Params:
------
breaks_list:分箱字典结构,{var_name:[bin],...},支持scorecardpy与toad的breaks_list结构
combine_ratio,float,分箱合并阈值,在bin_limit=1的情况下,阈值越大合并的箱数越多
+ 数值特征建议范围0.01-0.1
+ 分类特征中若类别水平非常多则建议加大combine_ratio,建议范围0.5
bin_limit:int,分箱合并阈值,最终分箱的最低分箱数限制,越低则合并的箱数越多,建议4-6,同时若特征初始分箱小于bin_limit则不执行合并算法
seed:int,kmeans随机种子,
+ 本算法在合并差距较大的barprob箱时,kmeans的随机性会被放大导致合并结果不可复现,设定seed值以复现合并结果
+ 设定合理的combine_ratio与bin_limit可极大的降低kmeans的随机性
sample_weight:array,样本权重
special_values,list,dict,特殊值指代值,若数据中某些值或某列某些值需特殊对待(这些值不是np.nan)时设定
+ None,无特殊值
+ list=[value1,value2,...],数据中所有列的值在[value1,value2,...]中都会被替换,字符被替换为'missing',数值被替换为np.nan
+ dict={col_name1:[value1,value2,...],...},数据中指定列替换,被指定的列的值在[value1,value2,...]中都会被替换,字符被替换为'missing',数值被替换为np.nan
n_jobs,int,并行数量,默认-1,在数据量较大、列较多的前提下可极大提升效率但会增加内存占用
verbose,int,并行信息输出等级
Attributes:
-------
breaks_list:dict,产生的分箱dict
bins:dict,当前breaks_list下的特征分析报告
"""
def __init__(self,breaks_list,combine_ratio=0.1,bin_limit=5,seed=123,sample_weight=None,special_values=None,n_jobs=-1,verbose=0):
self.combine_ratio = combine_ratio
self.bin_limit=bin_limit
self.breaks_list=breaks_list
self.special_values=special_values
self.sample_weight=sample_weight
self.seed=seed
self.n_jobs=n_jobs
self.verbose=verbose
def fit(self, X, y):
self._check_data(X, y)
#breaks_list=self.get_Breaklist_sc(self.breaks_list,X,y)
breaks_list=self.breaks_list
n_jobs=effective_n_jobs(self.n_jobs)
parallel=Parallel(n_jobs=n_jobs,verbose=self.verbose)
col_break=parallel(delayed(self._combine_badprob_kmeans)(X[col],y,
self.combine_ratio,
self.bin_limit,
self.breaks_list[col],
self.sample_weight,
self.special_values,
self.seed)
for col in list(breaks_list.keys()))
self.breaks_list={col:breaks for col,breaks,_ in col_break}
self.bins={col:vtab for col,_,vtab in col_break}
return self
def transform(self, X,y=None):
return X
def _combine_badprob_kmeans(self,col,y,combine_ratio,bin_limit,breaks,ws=None,special_values=None,random_state=123):
#global var_bin,res_km_s
var_raw=self._sp_replace_single(col,self._check_spvalues(col.name,special_values),fill_num=np.nan,fill_str='special')
if is_array_like(ws):
ws=ws
if ws.size!=y.size:
raise ValueError('length of weight not equal to y')
else:
ws=pd.Series(np.ones(col.size),index=y.index)
if is_string_dtype(var_raw):
#fillna
var_cut_fillna=pd.Series(np.where(var_raw.isnull(),'missing',var_raw),
index=var_raw.index,
name='bin')
#map raw code to new code
var_code_raw=var_cut_fillna.unique().tolist()
map_codes=raw_to_bin_sc(var_code_raw,breaks)
var_map=var_cut_fillna.map(map_codes)
#initialize params
combine_ratio_count=True
n_clusters=len(breaks)
iter_times=len(breaks)
iters=0
breaks=breaks
#no merge if n_clusters<=bin_limit(user-defined)
if n_clusters<bin_limit:
vtab=varReportSinge().report(col,y,breaks=breaks,sample_weight=ws,special_values=special_values)
return col.name,breaks,vtab
else:
while True:
#get badprob
gp=pd.concat([var_map,y.mul(ws).rename(y.name),ws.rename('ws')],axis=1).groupby(var_map)
var_bin=gp[y.name].sum().div(gp['ws'].sum()).rename('badprob').reset_index()
var_bin['bin']=var_bin['bin'].astype('str')
#unique values for n_cluster adjustion
n_clusters_unique=var_bin['badprob'].unique().size
#combine_ratio_count>0: lower n_clusters
if combine_ratio_count:
#n_clusters should not greater than unique samples in data
if n_clusters_unique<n_clusters:
n_clusters=n_clusters_unique-1
else:
n_clusters=n_clusters-1
#update combine_ratio when combine_ratio_count=0 to make futher merge
else:
if combine_ratio==1:
combine_ratio=combine_ratio
else:
combine_ratio=combine_ratio+0.02
#n_clusters not 0
if n_clusters<=1:
n_clusters=1
n_clusters=var_bin['badprob'].unique().size-1
res_km=KMeans(n_clusters=n_clusters,random_state=random_state).fit_predict(var_bin[['badprob']])
res_km_s=pd.Series(res_km,var_bin.index,name='cluster')
#update string breaks
breaks=var_bin.groupby(res_km_s)['bin'].apply(lambda x : '%,%'.join(x)).tolist()
#combine_ratio calculation
var_bin_ratio=var_bin['badprob'].diff(1).abs().div(var_bin.badprob+1e-10)
combine_ratio_count=var_bin_ratio.lt(combine_ratio).sum()
#map old string codes to new
var_code_raw=var_map.unique().tolist()
mapcode=raw_to_bin_sc(var_code_raw,breaks)
var_map=var_map.map(mapcode)
iters=iters+1
#stop condition 1/2: n_clusters<=bin_limit
if len(breaks)<=bin_limit:
break
#stop condition 2/2:iters gt max iters
if iters>=iter_times:
break
vtab=varReportSinge().report(col,y,breaks=breaks,sample_weight=ws,special_values=special_values)
return col.name,breaks,vtab
elif is_numeric_dtype(var_raw):
#initialize params
combine_ratio_count=True
n_clusters=len(breaks)+1
iter_times=col.unique().size
iters=0
breaks=breaks
#no merge if n_clusters<=bin_limit(user-defined)
if n_clusters<bin_limit:
vtab=varReportSinge().report(col,y,breaks=breaks,sample_weight=ws,special_values=special_values)
return col.name,breaks,vtab
else:
while True:
var_cut=pd.cut(var_raw,[-np.inf]+breaks+[np.inf],duplicates='drop',right=False)
#get badprob
var_cut_fillna=pd.Series(np.where(var_cut.isnull(),'missing',var_cut),
index=var_cut.index,
name='bin')
#get badprob
gp=pd.concat([var_cut_fillna,y.mul(ws).rename(y.name),ws.rename('ws')],axis=1).groupby(var_cut_fillna)
var_bin=gp[y.name].sum().div(gp['ws'].sum()).rename('badprob').reset_index()
#numeric missings excluded
var_bin=var_bin[var_bin['bin']!='missing']
var_bin['bin']=var_bin['bin'].astype('str')
#unique values for n_cluster adjustion
n_clusters_unique=var_bin['badprob'].unique().size
#combine_ratio_count>0: lower n_clusters
if combine_ratio_count:
#n_clusters should not greater than unique samples in data
if n_clusters_unique<n_clusters:
n_clusters=n_clusters_unique-1
else:
n_clusters=n_clusters-1
#stop condition 1/3:combine_ratio_count=0 then break
else:
if combine_ratio==1:
combine_ratio=combine_ratio
else:
combine_ratio=combine_ratio+0.02
#n_clusters not 0
if n_clusters<=1:
n_clusters=1
res_km=KMeans(n_clusters=n_clusters,random_state=random_state).fit_predict(var_bin[['badprob']])
res_km_s=pd.Series(res_km,var_bin.index,name='cluster')
#get index of bins to be merged
g_index_list=var_bin.groupby(res_km_s)['bin'].apply(lambda x : x.index.sort_values().tolist()).tolist()
#combine_ratio_count calculation
var_bin_ratio=var_bin['badprob'].diff(1).abs().div(var_bin.badprob+1e-10)
combine_ratio_count=var_bin_ratio.lt(combine_ratio).sum()
#remove points from orginal breaks
index_drop=self._getindex(g_index_list)
breaks=np.delete(breaks,index_drop).tolist()
iters=iters+1
#stop condition 1/2:bin_num<=bin_limit(user-defined)
if len(breaks)+1<=bin_limit:
#print('len(breaks)<=bin_limit')
break
#stop condition 2/2:iters gt max iters
if iters>=iter_times:
#print('iter_times')
break
vtab=varReportSinge().report(col,y,breaks=breaks,sample_weight=ws,special_values=special_values)
return col.name,breaks,vtab
#列类型为其他特殊情况
else:
raise ValueError("col's dtype in (number,object).")
def _getindex(self,g_index_list):
"""
寻找list中值连续索引的位置并标记
"""
ll=[]
for lst in g_index_list:
fun = lambda x: x[1]-x[0]
for k, g in groupby(enumerate(lst), fun):
l1 = [j for i, j in g]
if len(l1) > 1:
ll.append(min(l1))
return ll
class binTree(Base,Specials,BaseEstimator):
"""
决策树递归最优分箱
分类特征处理方式:按照badrate对类进行排序并进行ordinal编码再进行分箱(与scorecardpy一致)
分类中不要出现字符空('' or "")类
Params:
------
max_bin=50,初始分箱数
+ 使用Pretty Breakpoints获取预分箱点,详见R的pretty函数
+ 移除异常值,移除边界点
+ 越多的初始分箱数能够得到越好的最优分箱点,但会增加计算量。max_bin=50时与scorecardpy一致
criteria='iv',决策树进行分割的指标,
+ 目前支持iv_gain与ks_gain,gain表示分割前后的指标增益,参数tol可控制分割停止的增益限制
+ 当适用样本权重ws时,算法会改为计算加权的iv_gain与ks_gain
max_iters=100,决策树递归次数
tol=1e-4,决策树进行分割的指标的增益小于tol时停止分割
+ tol越小分割越容易,越大分割越难
distr_limit=0.05,每一箱的样本占比限制
bin_num_limit=8,分箱总数限制
coerce_monotonic=False,是否强制bad_prob单调,默认否
+ 为True时,本算法中会在最优分割过程中加入单调性限制,强制每一个新的分割点都必须先使bad_rate单调
+ 若x与y本身有单调趋势则强制单调能够取得理想的结果,若x与y的关系是非线性关系则强制单调结果会不理想
ws=None,None or pandas.core.series.Series,样本权重
+ 样本权重影响分割的指标iv_gain与ks_gain的计算结果,进而影响最优分割点的选择
+ 若仅对好坏样本加权(权重非0)则iv_gain与ks_gain无变化,也不会影响最优分割点的选择
+ 若coerce_monotonic=True,样本权重影响bad_rate的计算进而会影响单调最优分箱的分割点的选择
+ 若仅对好坏样本加权(权重非0)则bad_rate的计算的排序性不变,也不会影响最优分割点的选择
special_values:特殊值指代值,若数据中某些值或某列某些值需特殊对待(这些值不是np.nan)时设定
+ None,保证数据默认
+ list=[value1,value2,...],数据中所有列的值在[value1,value2,...]中都会被替换,字符被替换为'missing',数值被替换为np.nan
+ dict={col_name1:[value1,value2,...],...},数据中指定列替换,被指定的列的值在[value1,value2,...]中都会被算法认定为'special'
n_jobs=-1,int,并行数量,默认-1,在数据量较大、列较多的前提下可极大提升效率但会增加内存占用
verbose=0,并行信息输出等级
Attributes:
-------
breaks_list:dict,产生的分箱dict
bins:dict,当前breaks_list下的特征分析报告
"""
def __init__(self,max_bin=50,criteria='iv',max_iters=100,
tol=1e-4,distr_limit=0.05,bin_num_limit=8,coerce_monotonic=False,
ws=None,special_values=None,n_jobs=-1,verbose=0):
self.max_bin=max_bin
self.criteria=criteria
self.max_iters=max_iters
self.tol=tol
self.distr_limit=distr_limit
self.bin_num_limit=bin_num_limit
self.ws=ws
self.coerce_monotonic=coerce_monotonic
self.special_values=special_values
self.n_jobs=n_jobs
self.verbose=verbose
def fit(self, X, y):
self._check_data(X, y)
n_jobs=effective_n_jobs(self.n_jobs)
p=Parallel(n_jobs=n_jobs,verbose=self.verbose)
res=p(delayed(self._get_treecut)(col[1],y,self.max_bin,
self.criteria,self.max_iters,
self.tol,self.distr_limit,
self.bin_num_limit,
self.ws,
self.coerce_monotonic,
self.special_values) for col in X.iteritems())
self.breaks_list={col_name:breaks for col_name,breaks,_ in res}
self.bins={col_name:vtab for col_name,_,vtab in res}
return self
def transform(self, X,y=None):
return X
def _get_treecut(self,col,y,max_bin,criteria,max_iters,tol,distr_limit,bin_num_limit,ws,coerce_monotonic,special_values):
col_raw=col.copy()
col=self._sp_replace_single(col,self._check_spvalues(col.name,special_values),fill_num=np.nan,fill_str='special')
#sample_wieght
if is_array_like(ws):
ws=ws.values()
else:
ws=None
#numeric column
if is_numeric_dtype(col):
#no cut applied when col's unique value pop too high
if col.value_counts(dropna=False).div(col.size).max()>0.95:
breaks=[]
vtab=varReportSinge().report(col_raw,y,breaks,sample_weight=ws,special_values=special_values)
elif np.unique(col[~np.isnan(col)]).size==1:
breaks=[]
vtab=varReportSinge().report(col_raw,y,breaks,sample_weight=ws,special_values=special_values)
#tree cut
else:
breaks=self._get_bestsplit(col.values,y.values,max_bin=max_bin,
criteria=criteria,
max_iters=100,
tol=0.0001,
ws=ws,
distr_limit=distr_limit,
is_str_dtype=False,
coerce_monotonic=coerce_monotonic,
bin_num_limit=bin_num_limit)
vtab=varReportSinge().report(col_raw,y,breaks,sample_weight=ws,special_values=special_values)
#string columns
elif is_string_dtype(col):
if np.unique(col).size==1:
breaks=[]
else:
#sort levels by bad_rate(no-wieght)
codes=y.groupby(col).mean().sort_values().index.tolist()
#ordinal encode data start with 0
map_code=dict(zip(codes,range(len(codes))))
#tree cut
breaks_raw=self._get_bestsplit(col.map(map_code).values,
y.values,
criteria=criteria,
max_iters=100,
tol=1e-4,
ws=ws,
distr_limit=distr_limit,
is_str_dtype=True,
bin_num_limit=bin_num_limit)
#restore string breaks
breaks=['%,%'.join(i) for i in np.split(codes,np.int32(breaks_raw)) if i.tolist()]
vtab=varReportSinge().report(col_raw,y,breaks,sample_weight=ws,special_values=special_values)
else:
raise ValueError("col's dtype in ('number','object')")
return col.name,breaks,vtab
def _get_bestsplit(self,col,y,max_bin=50,ws=None,criteria='iv',tol=1e-4,
max_iters=100,distr_limit=0.05,bin_num_limit=8,
is_str_dtype=False,coerce_monotonic=False):
#get sample_weight
if is_array_like(ws):
if ws.size!=y.size:
raise ValueError('length of weight not equal to y')
y = y * ws
count=ws
else:
count=np.ones(col.size)
nan_sum=pd.isnull(col).any()
#string dtype variable
if is_str_dtype:
cuts_remain=np.unique(col)
#number dtype variable
else:
| |
<filename>model.py
from abc import ABC, abstractmethod
from datetime import date, datetime
from decimal import Decimal, ROUND_HALF_EVEN
from enum import Enum, Flag, auto, unique
from itertools import permutations
from typing import Any, Dict, Iterable, NamedTuple, Optional, TypeVar, Union
import re
@unique
class Currency(Enum):
USD = "USD"
GBP = "GBP"
AUD = "AUD"
EUR = "EUR"
JPY = "JPY"
CAD = "CAD"
CHF = "CHF"
NZD = "NZD"
def format(self, quantity: Decimal) -> str:
if quantity < 0:
return '({})'.format(self.format(abs(quantity)))
if self == Currency.USD:
return '${:,.2f}'.format(quantity)
elif self == Currency.GBP:
return '£{:,.2f}'.format(quantity)
elif self == Currency.AUD:
return 'AU${:,.2f}'.format(quantity)
elif self == Currency.EUR:
return '€{:,.2f}'.format(quantity)
elif self == Currency.JPY:
return '¥{:,.0f}'.format(quantity)
elif self == Currency.CAD:
return 'C${:,.2f}'.format(quantity)
elif self == Currency.NZD:
return 'NZ${:,.2f}'.format(quantity)
else:
return '{} {:,}'.format(self.value, quantity)
T = TypeVar('T', Decimal, int)
class Cash:
quantization = Decimal('0.0001')
@classmethod
def quantize(cls, d: Decimal) -> Decimal:
return d.quantize(cls.quantization, rounding=ROUND_HALF_EVEN)
def __init__(self, currency: Currency, quantity: Decimal):
if not quantity.is_finite():
raise ValueError(
'Cash quantity {} is not a finite number'.format(quantity))
self._currency = currency
self._quantity = self.quantize(quantity)
super().__init__()
@property
def currency(self) -> Currency:
return self._currency
@property
def quantity(self) -> Decimal:
return self._quantity
def __repr__(self) -> str:
return 'Cash(currency={}, quantity={})'.format(repr(self.currency),
repr(self.quantity))
def __str__(self) -> str:
return self.currency.format(self.quantity)
def __add__(self, other: Any) -> 'Cash':
if isinstance(other, Cash):
if self.currency != other.currency:
raise ValueError(
'Currency of {} must match {} for arithmetic'.format(
self, other))
return Cash(currency=self.currency,
quantity=self.quantity + other.quantity)
else:
return Cash(currency=self.currency, quantity=self.quantity + other)
def __sub__(self, other: Any) -> 'Cash':
if isinstance(other, Cash):
if self.currency != other.currency:
raise ValueError(
'Currency of {} must match {} for arithmetic'.format(
self, other))
return Cash(currency=self.currency,
quantity=self.quantity - other.quantity)
else:
return Cash(currency=self.currency, quantity=self.quantity - other)
def __mul__(self, other: T) -> 'Cash':
return Cash(currency=self.currency, quantity=self.quantity * other)
def __truediv__(self, other: T) -> 'Cash':
return Cash(currency=self.currency, quantity=self.quantity / other)
def __neg__(self) -> 'Cash':
return Cash(currency=self.currency, quantity=-self.quantity)
def __abs__(self) -> 'Cash':
return Cash(currency=self.currency, quantity=abs(self.quantity))
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Cash):
# Make mypy happy
b: bool = self.quantity == other
return b
return self.currency == other.currency and self.quantity == other.quantity
def __lt__(self, other: 'Cash') -> bool:
if self.currency != other.currency:
raise ValueError(
'Currency of {} must match {} for comparison'.format(
self, other))
return self.quantity < other.quantity
def __le__(self, other: 'Cash') -> bool:
if self.currency != other.currency:
raise ValueError(
'Currency of {} must match {} for comparison'.format(
self, other))
return self.quantity <= other.quantity
def __gt__(self, other: 'Cash') -> bool:
if self.currency != other.currency:
raise ValueError(
'Currency of {} must match {} for comparison'.format(
self, other))
return self.quantity > other.quantity
def __ge__(self, other: 'Cash') -> bool:
if self.currency != other.currency:
raise ValueError(
'Currency of {} must match {} for comparison'.format(
self, other))
return self.quantity >= other.quantity
def __hash__(self) -> int:
return hash((self.currency, self.quantity))
class Instrument(ABC):
multiplierQuantization = Decimal('0.1')
@classmethod
def quantizeMultiplier(cls, multiplier: Decimal) -> Decimal:
return multiplier.quantize(cls.multiplierQuantization,
rounding=ROUND_HALF_EVEN)
@abstractmethod
def __init__(self, symbol: str, currency: Currency):
if not symbol:
raise ValueError('Expected non-empty symbol for instrument')
if not currency:
raise ValueError('Expected currency for instrument')
self._symbol = symbol
self._currency = currency
super().__init__()
@property
def symbol(self) -> str:
return self._symbol
@property
def currency(self) -> Currency:
return self._currency
@property
def multiplier(self) -> Decimal:
return Decimal(1)
def __eq__(self, other: Any) -> bool:
# Strict typechecking, because we want different types of Instrument to be inequal.
if type(self) != type(other):
return False
return bool(self.symbol == other.symbol
and self.currency == other.currency)
def __hash__(self) -> int:
return hash((self.currency, self.symbol))
def __lt__(self, other: 'Instrument') -> bool:
return self.symbol < other.symbol
def __le__(self, other: 'Instrument') -> bool:
return self.symbol <= other.symbol
def __gt__(self, other: 'Instrument') -> bool:
return self.symbol > other.symbol
def __ge__(self, other: 'Instrument') -> bool:
return self.symbol >= other.symbol
def __format__(self, spec: str) -> str:
return format(self.symbol, spec)
def __repr__(self) -> str:
return '{}(symbol={}, currency={})'.format(repr(type(self)),
repr(self.symbol),
repr(self.currency))
def __str__(self) -> str:
return self._symbol
# Also used for ETFs.
class Stock(Instrument):
def __init__(self, symbol: str, currency: Currency):
super().__init__(symbol, currency)
class Bond(Instrument):
regexCUSIP = r'^[0-9]{3}[0-9A-Z]{5}[0-9]$'
@classmethod
def validBondSymbol(cls, symbol: str) -> bool:
return re.match(cls.regexCUSIP, symbol) is not None
def __init__(self,
symbol: str,
currency: Currency,
validateSymbol: bool = True):
if validateSymbol and not self.validBondSymbol(symbol):
raise ValueError(
'Expected symbol to be a bond CUSIP: {}'.format(symbol))
super().__init__(symbol, currency)
@unique
class OptionType(Enum):
PUT = 'P'
CALL = 'C'
class Option(Instrument):
# Matches the multiplicative factor in OCC options symbology.
strikeQuantization = Decimal('0.001')
@classmethod
def quantizeStrike(cls, strike: Decimal) -> Decimal:
return strike.quantize(cls.strikeQuantization,
rounding=ROUND_HALF_EVEN)
def __init__(self,
underlying: str,
currency: Currency,
optionType: OptionType,
expiration: date,
strike: Decimal,
multiplier: Decimal = Decimal(100),
symbol: Optional[str] = None):
if not underlying:
raise ValueError('Expected non-empty underlying symbol for Option')
if not strike.is_finite() or strike <= 0:
raise ValueError(
'Expected positive strike price: {}'.format(strike))
if not multiplier.is_finite() or multiplier <= 0:
raise ValueError(
'Expected positive multiplier: {}'.format(multiplier))
self._underlying = underlying
self._optionType = optionType
self._expiration = expiration
self._strike = self.quantizeStrike(strike)
self._multiplier = self.quantizeMultiplier(multiplier)
if symbol is None:
# https://en.wikipedia.org/wiki/Option_symbol#The_OCC_Option_Symbol
symbol = '{:6}{}{}{:08.0f}'.format(underlying,
expiration.strftime('%y%m%d'),
optionType.value, strike * 1000)
super().__init__(symbol, currency)
@property
def underlying(self) -> str:
return self._underlying
@property
def optionType(self) -> OptionType:
return self._optionType
@property
def expiration(self) -> date:
return self._expiration
@property
def strike(self) -> Decimal:
return self._strike
@property
def multiplier(self) -> Decimal:
return self._multiplier
def __repr__(self) -> str:
return '{}(underlying={}, optionType={}, expiration={}, strike={}, currency={}, multiplier={})'.format(
repr(type(self)), repr(self.underlying), repr(self.optionType),
repr(self.expiration), repr(self.strike), repr(self.currency),
repr(self.multiplier))
class FutureOption(Option):
def __init__(self, symbol: str, underlying: str, currency: Currency,
optionType: OptionType, expiration: date, strike: Decimal,
multiplier: Decimal):
super().__init__(underlying=underlying,
currency=currency,
optionType=optionType,
expiration=expiration,
strike=strike,
multiplier=multiplier,
symbol=symbol)
class Future(Instrument):
def __init__(self, symbol: str, currency: Currency, multiplier: Decimal,
expiration: date):
if not multiplier.is_finite() or multiplier <= 0:
raise ValueError(
'Expected positive multiplier: {}'.format(multiplier))
self._multiplier = self.quantizeMultiplier(multiplier)
self._expiration = expiration
super().__init__(symbol, currency)
@property
def multiplier(self) -> Decimal:
return self._multiplier
@property
def expiration(self) -> date:
return self._expiration
def __repr__(self) -> str:
return '{}(symbol={}, currency={}, multiplier={}, expiration={})'.format(
repr(type(self)), repr(self.symbol), repr(self.currency),
repr(self.multiplier), repr(self.expiration))
class Forex(Instrument):
def __init__(self, baseCurrency: Currency, quoteCurrency: Currency):
if baseCurrency == quoteCurrency:
raise ValueError(
'Forex pair must be composed of different currencies, got {} and {}'
.format(repr(baseCurrency), repr(quoteCurrency)))
self._baseCurrency = baseCurrency
symbol = '{}{}'.format(baseCurrency.name, quoteCurrency.name)
super().__init__(symbol, quoteCurrency)
@property
def quoteCurrency(self) -> Currency:
return self.currency
@property
def baseCurrency(self) -> Currency:
return self._baseCurrency
def __repr__(self) -> str:
return '{}(baseCurrency={}, quoteCurrency={})'.format(
repr(type(self)), repr(self.baseCurrency),
repr(self.quoteCurrency))
Item = TypeVar('Item')
def allEqual(i: Iterable[Item]) -> bool:
for (a, b) in permutations(i, 2):
if a != b:
return False
return True
class Quote:
def __init__(self,
bid: Optional[Cash] = None,
ask: Optional[Cash] = None,
last: Optional[Cash] = None,
close: Optional[Cash] = None):
if bid and ask and ask < bid:
raise ValueError('Expected ask {} to be at least bid {}'.format(
ask, bid))
if not allEqual(
(price.currency
for price in [bid, ask, last, close] if price is not None)):
raise ValueError('Currencies in a quote should match: {}'.format(
[bid, ask, last, close]))
self._bid = bid
self._ask = ask
self._last = last
self._close = close
super().__init__()
@property
def bid(self) -> Optional[Cash]:
return self._bid
@property
def ask(self) -> Optional[Cash]:
return self._ask
@property
def last(self) -> Optional[Cash]:
return self._last
@property
def close(self) -> Optional[Cash]:
return self._close
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Quote):
return False
return self.bid == other.bid and self.ask == other.ask and self.last == other.last and self.close == other.close
def __hash__(self) -> int:
return hash((self.bid, self.ask, self.last, self.close))
def __repr__(self) -> str:
return 'Quote(bid={}, ask={}, last={}, close={})'.format(
repr(self.bid), repr(self.ask), repr(self.last), repr(self.close))
class LiveDataProvider(ABC):
@abstractmethod
def fetchQuote(self, instrument: Instrument) -> Quote:
pass
class Position:
quantityQuantization = Decimal('0.0001')
@classmethod
def quantizeQuantity(cls, quantity: Decimal) -> Decimal:
return quantity.quantize(cls.quantityQuantization,
rounding=ROUND_HALF_EVEN)
def __init__(self, instrument: Instrument, quantity: Decimal,
costBasis: Cash):
if instrument.currency != costBasis.currency:
raise ValueError(
'Cost basis {} should be in same currency as instrument {}'.
format(costBasis, instrument))
if not quantity.is_finite():
raise ValueError(
'Position quantity {} is not a finite number'.format(quantity))
quantity = self.quantizeQuantity(quantity)
if quantity == 0 and costBasis != 0:
raise ValueError(
'Cost basis {} should be zero if quantity is zero'.format(
repr(costBasis)))
self._instrument = instrument
self._quantity = quantity
self._costBasis = costBasis
super().__init__()
def combine(self, other: 'Position') -> 'Position':
if self.instrument != other.instrument:
raise ValueError(
'Cannot combine positions in two different instruments: {} and {}'
.format(self.instrument, other.instrument))
return Position(instrument=self.instrument,
| |
# Copyright 2019 The Keras Tuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"HyperParameters logic."
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import math
import numpy as np
import random
from tensorflow import keras
from ..protos import kerastuner_pb2
def _check_sampling_arg(sampling,
step,
min_value,
max_value,
hp_type='int'):
if sampling is None:
return None
if hp_type == 'int' and step != 1:
raise ValueError(
'`sampling` can only be set on an `Int` when `step=1`.')
if hp_type != 'int' and step is not None:
raise ValueError(
'`sampling` and `step` cannot both be set, found '
'`sampling`: ' + str(sampling) + ', `step`: ' + str(step))
_sampling_values = {'linear', 'log', 'reverse_log'}
sampling = sampling.lower()
if sampling not in _sampling_values:
raise ValueError(
'`sampling` must be one of ' + str(_sampling_values))
if sampling in {'log', 'reverse_log'} and min_value <= 0:
raise ValueError(
'`sampling="' + str(sampling) + '" is not supported for '
'negative values, found `min_value`: ' + str(min_value))
return sampling
def _check_int(val, arg):
int_val = int(val)
if int_val != val:
raise ValueError(
arg + ' must be an int, found: ' + str(val))
return int_val
class HyperParameter(object):
"""HyperParameter base class.
# Arguments:
name: Str. Name of parameter. Must be unique.
default: Default value to return for the
parameter.
"""
def __init__(self, name, default=None):
self.name = name
self._default = default
def get_config(self):
return {'name': self.name, 'default': self.default}
@property
def default(self):
return self._default
def random_sample(self, seed=None):
raise NotImplementedError
@classmethod
def from_config(cls, config):
return cls(**config)
class Choice(HyperParameter):
"""Choice of one value among a predefined set of possible values.
# Arguments:
name: Str. Name of parameter. Must be unique.
values: List of possible values. Values must be int, float,
str, or bool. All values must be of the same type.
ordered: Whether the values passed should be considered to
have an ordering. This defaults to `True` for float/int
values. Must be `False` for any other values.
default: Default value to return for the parameter.
If unspecified, the default value will be:
- None if None is one of the choices in `values`
- The first entry in `values` otherwise.
"""
def __init__(self, name, values, ordered=None, default=None):
super(Choice, self).__init__(name=name, default=default)
if not values:
raise ValueError('`values` must be provided.')
self.values = values
# Type checking.
types = set(type(v) for v in values)
unsupported_types = types - {int, float, str, bool}
if unsupported_types:
raise TypeError(
'A `Choice` can contain only `int`, `float`, `str`, or '
'`bool`, found values: ' + str(values) + 'with '
'types: ' + str(unsupported_types))
if len(types) > 1:
raise TypeError(
'A `Choice` can contain only one type of value, found '
'values: ' + str(values) + ' with types ' + str(types))
self._type = types.pop()
# Get or infer ordered.
self.ordered = ordered
orderable_types = {int, float}
if self.ordered and self._type not in orderable_types:
raise ValueError('`ordered` must be `False` for non-numeric '
'types.')
if self.ordered is None:
self.ordered = self._type in orderable_types
if default is not None and default not in values:
raise ValueError(
'The default value should be one of the choices. '
'You passed: values=%s, default=%s' % (values, default))
def __repr__(self):
return 'Choice(name: "{}", values: {}, ordered: {}, default: {})'.format(
self.name, self.values, self.ordered, self.default)
@property
def default(self):
if self._default is None:
if None in self.values:
return None
return self.values[0]
return self._default
def random_sample(self, seed=None):
random_state = random.Random(seed)
return random_state.choice(self.values)
def get_config(self):
config = super(Choice, self).get_config()
config['values'] = self.values
config['ordered'] = self.ordered
return config
@classmethod
def from_proto(cls, proto):
values = [getattr(val, val.WhichOneof('kind')) for val in proto.values]
default = getattr(proto.default, proto.default.WhichOneof('kind'), None)
return cls(
name=proto.name,
values=values,
ordered=proto.ordered,
default=default)
def to_proto(self):
if self._type == str:
values = [kerastuner_pb2.Value(string_value=v) for v in self.values]
default = kerastuner_pb2.Value(string_value=self.default)
elif self._type == int:
values = [kerastuner_pb2.Value(int_value=v) for v in self.values]
default = kerastuner_pb2.Value(int_value=self.default)
else:
values = [kerastuner_pb2.Value(float_value=v) for v in self.values]
default = kerastuner_pb2.Value(float_value=self.default)
return kerastuner_pb2.Choice(
name=self.name,
ordered=self.ordered,
values=values,
default=default)
class Int(HyperParameter):
"""Integer range.
Note that unlinke Python's `range` function, `max_value` is *included* in
the possible values this parameter can take on.
# Arguments:
name: Str. Name of parameter. Must be unique.
min_value: Int. Lower limit of range (included).
max_value: Int. Upper limit of range (included).
step: Int. Step of range.
sampling: Optional. One of "linear", "log",
"reverse_log". Acts as a hint for an initial prior
probability distribution for how this value should
be sampled, e.g. "log" will assign equal
probabilities to each order of magnitude range.
default: Default value to return for the parameter.
If unspecified, the default value will be
`min_value`.
"""
def __init__(self,
name,
min_value,
max_value,
step=1,
sampling=None,
default=None):
super(Int, self).__init__(name=name, default=default)
self.max_value = _check_int(max_value, arg='max_value')
self.min_value = _check_int(min_value, arg='min_value')
self.step = _check_int(step, arg='step')
self.sampling = _check_sampling_arg(
sampling, step, min_value, max_value, hp_type='int')
def __repr__(self):
return ('Int(name: "{}", min_value: {}, max_value: {}, step: {}, '
'sampling: {}, default: {})').format(
self.name,
self.min_value,
self.max_value,
self.step,
self.sampling,
self.default)
def random_sample(self, seed=None):
random_state = random.Random(seed)
prob = float(random_state.random())
return cumulative_prob_to_value(prob, self)
@property
def default(self):
if self._default is not None:
return self._default
return self.min_value
def get_config(self):
config = super(Int, self).get_config()
config['min_value'] = self.min_value
config['max_value'] = self.max_value
config['step'] = self.step
config['sampling'] = self.sampling
config['default'] = self._default
return config
@classmethod
def from_proto(cls, proto):
return cls(name=proto.name,
min_value=proto.min_value,
max_value=proto.max_value,
step=proto.step if proto.step else None,
sampling=_sampling_from_proto(proto.sampling),
default=proto.default)
def to_proto(self):
return kerastuner_pb2.Int(
name=self.name,
min_value=self.min_value,
max_value=self.max_value,
step=self.step if self.step is not None else 0,
sampling=_sampling_to_proto(self.sampling),
default=self.default)
class Float(HyperParameter):
"""Floating point range, can be evenly divided.
# Arguments:
name: Str. Name of parameter. Must be unique.
min_value: Float. Lower bound of the range.
max_value: Float. Upper bound of the range.
step: Optional. Float, e.g. 0.1.
smallest meaningful distance between two values.
Whether step should be specified is Oracle dependent,
since some Oracles can infer an optimal step automatically.
sampling: Optional. One of "linear", "log",
"reverse_log". Acts as a hint for an initial prior
probability distribution for how this value should
be sampled, e.g. "log" will assign equal
probabilities to each order of magnitude range.
default: Default value to return for the parameter.
If unspecified, the default value will be
`min_value`.
"""
def __init__(self,
name,
min_value,
max_value,
step=None,
sampling=None,
default=None):
super(Float, self).__init__(name=name, default=default)
self.max_value = float(max_value)
self.min_value = float(min_value)
if step is not None:
self.step = float(step)
else:
self.step = None
self.sampling = _check_sampling_arg(
sampling, step, min_value, max_value, hp_type='float')
def __repr__(self):
return ('Float(name: "{}", min_value: {}, max_value: {}, step: {}, '
'sampling: {}, default: {})').format(
self.name,
self.min_value,
self.max_value,
self.step,
self.sampling,
self.default)
@property
def default(self):
if self._default is not None:
return self._default
return self.min_value
def random_sample(self, seed=None):
random_state = random.Random(seed)
prob = float(random_state.random())
return cumulative_prob_to_value(prob, self)
def get_config(self):
config = super(Float, self).get_config()
config['min_value'] = self.min_value
config['max_value'] = self.max_value
config['step'] = self.step
config['sampling'] = self.sampling
return config
@classmethod
def from_proto(cls, proto):
return cls(name=proto.name,
min_value=proto.min_value,
max_value=proto.max_value,
step=proto.step if proto.step else None,
sampling=_sampling_from_proto(proto.sampling),
default=proto.default)
def to_proto(self):
return kerastuner_pb2.Float(
name=self.name,
min_value=self.min_value,
max_value=self.max_value,
step=self.step if self.step is not None else 0.0,
sampling=_sampling_to_proto(self.sampling),
default=self.default)
class Boolean(HyperParameter):
"""Choice between True and False.
# Arguments
name: Str. Name of parameter. Must be unique.
default: Default value to return for the parameter.
If unspecified, the default value will be False.
"""
def __init__(self, name, default=False):
super(Boolean, self).__init__(name=name, default=default)
if default not in {True, False}:
raise ValueError(
'`default` must be a Python boolean. '
'You passed: default=%s' % (default,))
def __repr__(self):
return 'Boolean(name: "{}", default: {})'.format(
self.name, self.default)
def random_sample(self, seed=None):
random_state = random.Random(seed)
return random_state.choice((True, False))
@classmethod
def from_proto(cls, proto):
return cls(name=proto.name,
default=proto.default)
def to_proto(self):
return kerastuner_pb2.Boolean(
name=self.name,
default=self.default)
class Fixed(HyperParameter):
"""Fixed, untunable value.
# Arguments
name: Str. Name of parameter. Must be unique.
value: Value | |
<gh_stars>0
"""
(C) Copyright 2021 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on June 30, 2021
"""
import os, glob
import numpy as np
import SimpleITK as sitk
import pydicom
from scipy.ndimage.morphology import binary_dilation
# ========================================================================
# sequences to be read, and the sequence name
SEQ_DICT_PROSTATEX = \
{
't2_tse_tra': 'T2',
't2_tse_tra_Grappa3': 'T2',
't2_tse_tra_320_p2': 'T2',
'ep2d-advdiff-3Scan-high bvalue 100': 'b',
'ep2d-advdiff-3Scan-high bvalue 500': 'b',
'ep2d-advdiff-3Scan-high bvalue 1400': 'b',
'ep2d_diff_tra2x2_Noise0_FS_DYNDISTCALC_BVAL': 'b',
'ep2d_diff_tra_DYNDIST': 'b_mix',
'ep2d_diff_tra_DYNDIST_MIX': 'b_mix',
'diffusie-3Scan-4bval_fs': 'b_mix',
'ep2d_DIFF_tra_b50_500_800_1400_alle_spoelen': 'b_mix',
'diff tra b 50 500 800 WIP511b alle spoelen': 'b_mix',
'ep2d_diff_tra_DYNDIST_MIX_ADC': 'ADC',
'diffusie-3Scan-4bval_fs_ADC': 'ADC',
'ep2d-advdiff-MDDW-12dir_spair_511b_ADC': 'ADC',
'ep2d-advdiff-3Scan-4bval_spair_511b_ADC': 'ADC',
'ep2d_DIFF_tra_b50_500_800_1400_alle_spoelen_ADC': 'ADC',
'diff tra b 50 500 800 WIP511b alle spoelen_ADC': 'ADC',
'ADC_S3_1': 'ADC',
'ep2d_diff_tra_DYNDIST_ADC': 'ADC',
}
SEQ_TO_USE = ['T2', 'b', 'b_mix', 'ADC', 'ktrans']
SUB_SEQ_TO_USE = ['T2', 'b400', 'b800', 'ADC', 'ktrans']
# sequences with special fix
B_SER_FIX = ['diffusie-3Scan-4bval_fs',
'ep2d_DIFF_tra_b50_500_800_1400_alle_spoelen',
'diff tra b 50 500 800 WIP511b alle spoelen']
# patients with special fix
EXP_PATIENTS = ['ProstateX-0191', 'ProstateX-0148', 'ProstateX-0180']
# use T2[-1],b400 [1],b800 [2]
SER_INX_TO_USE = {}
SER_INX_TO_USE['all'] = {'T2': -1, 'b': [1, 2], 'ADC': 0, 'ktrans': 0}
SER_INX_TO_USE['ProstateX-0148'] = {'T2': 1, 'b': [1, 2], 'ADC': 0, 'ktrans': 0}
SER_INX_TO_USE['ProstateX-0191'] = {'T2': -1, 'b': [0, 0], 'ADC': 0, 'ktrans': 0}
SER_INX_TO_USE['ProstateX-0180'] = {'T2': -1, 'b': [1, 2], 'ADC': 0, 'ktrans': 0}
# ==========================================================================
def create_resample(vol_ref, interpolation,size,spacing):
if interpolation == 'linear':
interpolator = sitk.sitkLinear
elif interpolation == 'nn':
interpolator = sitk.sitkNearestNeighbor
elif interpolation == 'bspline':
interpolator = sitk.sitkBSpline
resample = sitk.ResampleImageFilter()
resample.SetReferenceImage(vol_ref)
resample.SetOutputSpacing(spacing)
resample.SetInterpolator(interpolator)
resample.SetSize(size)
return resample
class PROSTATEX3DVolume:
def __init__(self,
patient_id: str=None,
imgs_path: str=None,
ktrans_data_path: str=None,
array: np.ndarray = None,
hdf5_filename: str = None,
verbose: bool = True,
):
self._patient_id = patient_id
self._imgs_path = imgs_path
self._ktrans_data_path = ktrans_data_path
self._array = array
self._hdf5_filename = hdf5_filename
self._verbose = verbose
if array is None and imgs_path is None and hdf5_filename is None:
print('ERROR link to volume OR path to images must be provided')
assert False, 'ERROR while creating CAP3DVolume: no input was provided'
# ========================================================================
def extract_stk_vol(self,img_path, img_list=[], reverse_order=False, is_path=True):
stk_vols = []
if is_path:
vol = sitk.ReadImage(img_path)
stk_vols.append(vol)
return stk_vols
series_reader = sitk.ImageSeriesReader()
if img_list == []:
img_list = [series_reader.GetGDCMSeriesFileNames(img_path)]
for n, imgs_names in enumerate(img_list):
if img_path not in img_list[0][0]:
imgs_names = [os.path.join(img_path, n) for n in imgs_names]
dicom_names = imgs_names[::-1] if reverse_order else imgs_names
series_reader.SetFileNames(dicom_names)
imgs = series_reader.Execute()
stk_vols.append(imgs)
return stk_vols
# ========================================================================
def sort_dicom_by_dicom_field(self,dcm_files, dicom_field=(0x19, 0x100c)):
dcm_values = {}
for dcm in dcm_files:
dcm_ds = pydicom.dcmread(dcm)
val = int(dcm_ds[dicom_field].value)
if val not in dcm_values:
dcm_values[val] = []
dcm_values[val].append(os.path.split(dcm)[-1])
sorted_names_list = [v for k, v in dcm_values.items()]
return sorted_names_list
# ========================================================================
def extract_prostatex_stk_vol(self):
ktrans_path = os.path.join(self._ktrans_data_path, self._patient_id)
if self._verbose: print('Patient ID: %s' % (self._patient_id))
# ------------------------
# images dict and sequences description dict
vols_dict = {k: [] for k in SEQ_TO_USE}
sequences_dict = {k: [] for k in SEQ_TO_USE}
for img_path in os.listdir(self._imgs_path):
try:
full_path = os.path.join(self._imgs_path, img_path)
dcm_files = glob.glob(os.path.join(full_path, '*.dcm'))
series_desc = pydicom.dcmread(dcm_files[0]).SeriesDescription
#------------------------
# print series description
series_desc_general = SEQ_DICT_PROSTATEX[series_desc] \
if series_desc in SEQ_DICT_PROSTATEX else 'UNKNOWN'
if self._verbose: print('\t- Series description:',' %s (%s)' % (series_desc, series_desc_general))
#------------------------
# ignore UNKNOWN series
if series_desc not in SEQ_DICT_PROSTATEX or \
SEQ_DICT_PROSTATEX[series_desc] not in SEQ_TO_USE:
continue
#------------------------
# b-series - sorting images by b-value
if SEQ_DICT_PROSTATEX[series_desc] == 'b_mix':
sorted_dicom_names = self.sort_dicom_by_dicom_field(dcm_files, dicom_field=(0x19, 0x100c))
stk_vols = self.extract_stk_vol(full_path, img_list=sorted_dicom_names, reverse_order=True, is_path=False)
#------------------------
# general case
else:
stk_vols = self.extract_stk_vol(full_path, img_list=[], reverse_order=False, is_path=False)
#------------------------
# volume dictionary
if SEQ_DICT_PROSTATEX[series_desc] == 'b_mix':
vols_dict['b'] += stk_vols
sequences_dict['b'] += [series_desc]
else:
vols_dict[SEQ_DICT_PROSTATEX[series_desc]] += stk_vols
sequences_dict[SEQ_DICT_PROSTATEX[series_desc]] += [series_desc]
except Exception as e:
print(e)
# Read ktrans image
try:
mhd_path = glob.glob(os.path.join(ktrans_path, '*.mhd'))[0]
print('\t- Reading: %s (%s) (%s)' % (os.path.split(mhd_path)[-1], 'Ktrans', 'ktrans'))
stk_vols = self.extract_stk_vol(mhd_path, img_list=[], reverse_order=False, is_path=True)
vols_dict['ktrans'] = stk_vols
sequences_dict['ktrans'] = [ktrans_path]
except Exception as e:
print(e)
if 'b_mix' in vols_dict.keys():
vols_dict.pop('b_mix')
sequences_dict.pop('b_mix')
return vols_dict, sequences_dict
# ========================================================================
def read_prostatex_sequences_patient(self):
def get_zeros_vol(vol):
if vol.GetNumberOfComponentsPerPixel() > 1:
ref_zeros_vol = sitk.VectorIndexSelectionCast(vol, 0)
else:
ref_zeros_vol = vol
zeros_vol = np.zeros_like(sitk.GetArrayFromImage(ref_zeros_vol))
zeros_vol = sitk.GetImageFromArray(zeros_vol)
zeros_vol.CopyInformation(ref_zeros_vol)
return zeros_vol
def stack_rel_vol_in_list(vols,series_inx_to_use,seq):
vols_list = []
for s, v0 in vols.items():
vol_inx_to_use = series_inx_to_use['all'][s]
if self._patient_id in EXP_PATIENTS:
vol_inx_to_use = series_inx_to_use[self._patient_id][s]
if isinstance(vol_inx_to_use,list):
for inx in vol_inx_to_use:
v = v0[inx]
if len(v):
vols_list.append(v)
else:
vols_list.append(get_zeros_vol(vols_list[0]))
if self._verbose: print('\n - problem with reading %s volume!' % s)
else:
v = v0[vol_inx_to_use]
if len(v):
vols_list.append(v)
else:
vols_list.append(get_zeros_vol(vols_list[0]))
if self._verbose: print('\n - problem with reading %s volume!' % s)
if ('b' in seq_info.keys()):
if (seq_info['b'][0] in B_SER_FIX):
vols_list[seq.index('b800')].CopyInformation(vols_list[seq.index('ADC')])
vols_list[seq.index('b400')].CopyInformation(vols_list[seq.index('ADC')])
if len(vols_list) != len(seq):
raise ValueError('Expected %d image modalities, found %d' % (len(seq), len(vols_list)))
return vols_list
# ------------------------
# extract stk vol list per sequence
vols_dict, seq_info = self.extract_prostatex_stk_vol()
# ------------------------
# stack volumes by seq order,
# keep only vol as defined in series_inx_to_use
vols_list = stack_rel_vol_in_list(vols_dict, SER_INX_TO_USE,SUB_SEQ_TO_USE)
return vols_list
# ========================================================================
def apply_rescaling(self,img, thres=(1.0, 99.0), method='noclip'):
eps = 0.000001
def rescale_single_channel_image(img):
# Deal with negative values first
min_value = np.min(img)
if min_value < 0:
img -= min_value
if method == 'clip':
val_l, val_h = np.percentile(img, thres)
img2 = img
img2[img < val_l] = val_l
img2[img > val_h] = val_h
img2 = (img2.astype(np.float32) - val_l) / (val_h - val_l + eps)
elif method == 'mean':
img2 = img / max(np.mean(img), 1)
elif method == 'median':
img2 = img / max(np.median(img), 1)
elif method == 'noclip':
val_l, val_h = np.percentile(img, thres)
img2 = img
img2 = (img2.astype(np.float32) - val_l) / (val_h - val_l + eps)
else:
img2 = img
return img2
# fix outlier image values
img[np.isnan(img)] = 0
# img[img < 0] = 0
# Process each channel independently
if len(img.shape) == 4:
for i in range(img.shape[-1]):
img[..., i] = rescale_single_channel_image(img[..., i])
else:
img = rescale_single_channel_image(img)
return img
# ========================================================================
def apply_resampling(self,img, mask, spacing=(0.5, 0.5, 3), size=(160, 160, 32),
transform=None, interpolation='bspline',
label_interpolator=sitk.sitkLabelGaussian,
):
ref = img if img != [] else mask
size = [int(s) for s in size]
resample = create_resample(ref, interpolation, size=size, spacing=spacing)
if ~(transform is None):
resample.SetTransform(transform)
img_r = resample.Execute(img)
resample.SetInterpolator(label_interpolator)
mask_r = resample.Execute(mask)
return img_r, mask_r
# ========================================================================
def preprocess_sequences(self,vols,interpolation='bspline', reference_inx=0):
# ------------------------
# casting to float32
vols = [sitk.Cast(im, sitk.sitkFloat32) for im in vols]
# ------------------------
# create resampling oparator based on ref vol
# define the reference volume
vol_ref = vols[reference_inx]
other_inx = list(set(range(0, len(vols))) - set([reference_inx]))
resample = create_resample(vol_ref, interpolation,size=vol_ref.GetSize() ,spacing=vol_ref.GetSpacing())
vols_res = []
for i,vol in enumerate(vols):
if i in other_inx:
vol_res = resample.Execute(vol)
else:
vol_res = vol_ref
vols_res.append(vol_res)
# ------------------------
# stack sequences in 4D sitk
vol_arr = [sitk.GetArrayFromImage(vol) for vol in (vols_res)]
# print('1'+str(len(vol_arr)))
vol_final = np.stack(vol_arr, axis=-1)
# print('2'+str(vol_final.shape))
vol_final_sitk = sitk.GetImageFromArray(vol_final, isVector=True)
# print('3'+str(vol_final_sitk.GetSize()))
vol_final_sitk.CopyInformation(vol_ref)
# ------------------------
# rescale intensity
vol_backup = sitk.Image(vol_final_sitk)
vol_array = sitk.GetArrayFromImage(vol_final_sitk)
if len(vol_array.shape)<4:
vol_array= vol_array[:,:,:,np.newaxis]
# print('4' + str(vol_array.shape))
vol_array = self.apply_rescaling(vol_array)
vol_final = sitk.GetImageFromArray(vol_array, isVector=True)
# print('5' + str(vol_final.GetSize()))
vol_final.CopyInformation(vol_backup)
vol_final = sitk.Image(vol_final)
return vol_final
# ========================================================================
def crop_lesion_vol(self,vol, position, ref, size=(160, 160, 32), spacing=(1, 1, 3), center_slice=None):
def get_lesion_mask(position, ref):
mask = np.zeros_like(sitk.GetArrayViewFromImage(ref), dtype=np.uint8)
coords = np.round(position[::-1]).astype(np.int)
mask[coords[0], coords[1], coords[2]] = 1
mask = binary_dilation(mask, np.ones((3, 5, 5))) + 0
mask_sitk = sitk.GetImageFromArray(mask)
mask_sitk.CopyInformation(ref)
return mask_sitk
mask = get_lesion_mask(position, ref)
vol.SetOrigin((0,) * 3)
mask.SetOrigin((0,) * 3)
vol.SetDirection(np.eye(3).flatten())
mask.SetDirection(np.eye(3).flatten())
ma_centroid = mask > 0.5
label_analysis_filer = sitk.LabelShapeStatisticsImageFilter()
label_analysis_filer.Execute(ma_centroid)
centroid = label_analysis_filer.GetCentroid(1)
offset_correction = np.array(size) * np.array(spacing)/2
corrected_centroid = np.array(centroid)
corrected_centroid[2] = center_slice * np.array(spacing[2])
offset = corrected_centroid - np.array(offset_correction)
translation = sitk.TranslationTransform(3, offset)
img, mask = self.apply_resampling(vol, mask, spacing=spacing, size=size, transform=translation)
return img, mask
if __name__ == "__main__":
path_to_db = '/gpfs/haifa/projects/m/msieve_dev3/usr/Tal/my_research/virtual_biopsy/prostate/experiments/V1/'
dataset = 'prostate_x'
if dataset=='tcia':
# for TCIA-Prostate-MRI-US-Biopsy
prostate_data_path = '/projects/msieve/MedicalSieve/PatientData/TCIA-Prostate-MRI-US-Biopsy/manifest-1599764098812/'
masks_data_path = prostate_data_path + '/STLs/'
sample = ('18042021', 'train', 'Prostate-MRI-US-Biopsy-0007', 'pred')
a = PROSTATEX3DVolume(path_to_db,prostate_data_path,masks_data_path,dataset,(16, 120, 120))
samples = a.__call__(sample)
a = 1
elif dataset=='prostate_x':
| |
<reponame>ballinc/covid19-api
from decouple import config
from flask import Flask, jsonify, url_for
from flask_caching import Cache
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
from flask_restplus import Api, Resource
import src.utils as util
from src.errors import CountryNotFound, RegionNotFound
API_VERSION = "v1"
BASE_PATH = config("BASE_PATH")
ROUTES = [
f"{BASE_PATH}/doc/",
f"{BASE_PATH}/api/{API_VERSION}/all/",
f"{BASE_PATH}/api/{API_VERSION}/all/<country>",
f"{BASE_PATH}/api/{API_VERSION}/history/<data_type>",
f"{BASE_PATH}/api/{API_VERSION}/history/<data_type>/total",
f"{BASE_PATH}/api/{API_VERSION}/history/<data_type>/<country>",
f"{BASE_PATH}/api/{API_VERSION}/history/<data_type>/<country>/regions",
f"{BASE_PATH}/api/{API_VERSION}/history/<data_type>/<country>/<region_name>",
f"{BASE_PATH}/api/{API_VERSION}/proportion/<data_type>",
f"{BASE_PATH}/api/{API_VERSION}/proportion/<data_type>/total",
f"{BASE_PATH}/api/{API_VERSION}/proportion/<data_type>/<country>",
f"{BASE_PATH}/api/{API_VERSION}/daily/<data_type>",
f"{BASE_PATH}/api/{API_VERSION}/daily/<data_type>/total",
f"{BASE_PATH}/api/{API_VERSION}/daily/<data_type>/<country>",
f"{BASE_PATH}/api/{API_VERSION}/proportion-daily/<data_type>",
f"{BASE_PATH}/api/{API_VERSION}/proportion-daily/<data_type>/total",
f"{BASE_PATH}/api/{API_VERSION}/proportion-daily/<data_type>/<country>",
]
SOURCES = [
"https://github.com/CSSEGISandData/COVID-19",
"https://www.worldometers.info/coronavirus/"
]
route_homepage = {
"documentation": f"{BASE_PATH}/doc",
"routes": ROUTES,
"<data_type>": "confirmed | recovered | deaths",
"Api version": API_VERSION,
"discord": "https://discord.gg/wTxbQYb",
"sources": SOURCES,
"github": "https://github.com/takitsu21/covid19-api"
}
responses = {
200: 'Success',
401: 'Unauthorized',
429: 'Rate limited',
404: 'Not found',
500: 'Internal server error'
}
app = Flask(__name__)
app.url_map.strict_slashes = False
limiter = Limiter(
app,
get_remote_address,
default_limits=["3/second", "60/minute", "2000/hour"],
default_limits_exempt_when=util.no_limit_owner
)
cache = Cache(
app,
config={
"CACHE_TYPE": "simple",
"CACHE_DEFAULT_TIMEOUT": 15 * 60 # 15 minutes caching
}
)
class SSLApiDoc(Api):
@property
def specs_url(self):
"""Monkey patch for HTTPS"""
scheme = 'http' if '5000' in self.base_url else 'https'
return url_for(self.endpoint('specs'), _external=True, _scheme=scheme)
api = SSLApiDoc(app, doc='/doc/', version='1.0', title='COVID19 API',
description="Coronavirus COVID 19 API")
@cache.memoize()
def all_data():
try:
data = util.read_json("data.json")
return jsonify(data)
except Exception as e:
return util.response_error(message=f"{type(e).__name__} : {e}")
@cache.memoize()
def all_country(country):
try:
data = util.read_json("data.json")
for region in data:
if util.pattern_match(
country,
region["country"],
region["iso2"],
region["iso3"]):
return jsonify(region)
raise CountryNotFound("This region cannot be found. Please try again.")
except CountryNotFound as e:
return util.response_error(message=f"{type(e).__name__} : {e}", status=404)
except Exception as e:
return util.response_error(message=f"{type(e).__name__} : {e}")
@cache.memoize()
def history(data_type):
try:
data = util.read_json(f"csv_{data_type}.json")
return jsonify(data)
except Exception as e:
return util.response_error(message=f"{type(e).__name__} : {e}")
@cache.memoize()
def history_country(data_type, country):
try:
data = util.read_json(f"csv_{data_type}.json")
for region in list(data.keys()):
if util.pattern_match(
country,
region,
data[region]["iso2"],
data[region]["iso3"]):
ret = data[region]
ret["name"] = region
return jsonify(ret)
raise CountryNotFound("This region cannot be found. Please try again.")
except CountryNotFound as e:
return util.response_error(message=f"{type(e).__name__} : {e}", status=404)
except Exception as e:
return util.response_error(message=f"{type(e).__name__} : {e}")
@cache.memoize()
def history_region(data_type, country, region_name):
try:
if country.lower() in ("us", "united states", "usa"):
data = util.read_json(f"csv_{data_type}_us_region.json")
else:
data = util.read_json(f"csv_{data_type}_region.json")
for inner_country in list(data.keys()):
if util.pattern_match(
country,
inner_country,
data[inner_country]["iso2"],
data[inner_country]["iso3"]):
for region in data[inner_country]["regions"]:
if region.lower() == region_name.lower():
return jsonify(data[inner_country]["regions"][region])
raise RegionNotFound("This region cannot be found. Please try again.")
except RegionNotFound as e:
return util.response_error(message=f"{type(e).__name__} : {e}", status=404)
except Exception as e:
return util.response_error(message=f"{type(e).__name__} : {e}")
@cache.memoize()
def history_region_all(data_type, country):
try:
if country.lower() in ("us", "united states", "usa"):
data = util.read_json(f"csv_{data_type}_us_region.json")
else:
data = util.read_json(f"csv_{data_type}_region.json")
for inner_country in list(data.keys()):
if util.pattern_match(
country,
inner_country,
data[inner_country]["iso2"],
data[inner_country]["iso3"]):
return jsonify(data[inner_country]["regions"])
raise CountryNotFound("This country cannot be found. Please try again.")
except CountryNotFound as e:
return util.response_error(message=f"{type(e).__name__} : {e}", status=404)
except Exception as e:
return util.response_error(message=f"{type(e).__name__} : {e}")
@cache.memoize()
def history_region_world(data_type):
try:
data = util.read_json(f"csv_{data_type}.json")
ret = {"history" : {}}
for d in data.keys():
for h in data[d]["history"].keys():
if h not in ret["history"]:
ret["history"][h] = int(data[d]["history"][h])
else:
ret["history"][h] += int(data[d]["history"][h])
return jsonify(ret)
except Exception as e:
return util.response_error(message=f"{type(e).__name__} : {e}")
@cache.memoize()
def proportion(data_type):
try:
data = util.read_json(f"csv_{data_type}.json")
for region in list(data.keys()):
ret = {"proportion" : {}}
if data[region]["iso3"] == "":
# TODO: Note, some regions do not have iso2/3 codes....
data[region] = {"proportion" : "This region doesn't work with this function atm"}
continue
if data[region]["iso3"] in util.populations:
pop = float(util.populations[data[region]["iso3"]])
else:
util.populations = util.csv_to_dict(util.CSV_POPULATIONS)
pop = float(util.populations[data[region]["iso3"]])
for d, h in data[region]["history"].items():
ret["proportion"][d] = f"{round(h / pop * 100, 5):.5f}"
ret["iso2"] = data[region]["iso2"]
ret["iso3"] = data[region]["iso3"]
data[region] = ret
return jsonify(data)
except Exception as e:
return util.response_error(message=f"{type(e).__name__} : {e}")
@cache.memoize()
def proportion_country(data_type, country):
try:
data = util.read_json(f"csv_{data_type}.json")
ret = {"proportion" : {}}
for region in list(data.keys()):
if util.pattern_match(
country,
region,
data[region]["iso2"],
data[region]["iso3"]):
if data[region]["iso3"] in util.populations:
pop = float(util.populations[data[region]["iso3"]])
else:
util.populations = util.csv_to_dict(util.CSV_POPULATIONS)
pop = float(util.populations[data[region]["iso3"]])
for d, h in data[region]["history"].items():
ret["proportion"][d] = f"{round(h / pop * 100, 5):.5f}"
ret["iso2"] = data[region]["iso2"]
ret["iso3"] = data[region]["iso3"]
ret["name"] = region
return jsonify(ret)
raise CountryNotFound("This region cannot be found. Please try again.")
except CountryNotFound as e:
return util.response_error(message=f"{type(e).__name__} : {e}", status=404)
except Exception as e:
return util.response_error(message=f"{type(e).__name__} : {e}")
@cache.memoize()
def proportion_region_world(data_type):
try:
data = util.read_json(f"csv_{data_type}.json")
ret = {"proportion" : {}}
for d in data.keys():
for h in data[d]["history"].keys():
if h not in ret["proportion"]:
ret["proportion"][h] = int(data[d]["history"][h])
else:
ret["proportion"][h] += int(data[d]["history"][h])
for h in ret["proportion"]:
ret["proportion"][h] = f"{round(int(ret['proportion'][h]) / int(util.WORLD_POPULATION) * 100, 5):.5f}"
return jsonify(ret)
except Exception as e:
return util.response_error(message=f"{type(e).__name__} : {e}")
@cache.memoize()
def daily(data_type):
try:
data = util.read_json(f"csv_{data_type}.json")
for region in list(data.keys()):
ret = {"daily" : {}}
prev = 0
for d, h in data[region]["history"].items():
ret["daily"][d] = h - prev
prev = int(h)
ret["iso2"] = data[region]["iso2"]
ret["iso3"] = data[region]["iso3"]
data[region] = ret
return jsonify(data)
except Exception as e:
return util.response_error(message=f"{type(e).__name__} : {e}")
@cache.memoize()
def daily_region_world(data_type):
try:
data = util.read_json(f"csv_{data_type}.json")
ret = {"daily" : {}}
for d in data.keys():
for h in data[d]["history"].keys():
if h not in ret["daily"]:
ret["daily"][h] = int(data[d]["history"][h])
else:
ret["daily"][h] += int(data[d]["history"][h])
prev = 0
for d, h in ret["daily"].items():
ret["daily"][d] = h - prev
prev = int(h)
return jsonify(ret)
except Exception as e:
return util.response_error(message=f"{type(e).__name__} : {e}")
@cache.memoize()
def daily_country(data_type, country):
try:
data = util.read_json(f"csv_{data_type}.json")
ret = {"daily" : {}}
for region in list(data.keys()):
if util.pattern_match(
country,
region,
data[region]["iso2"],
data[region]["iso3"]):
prev = 0
for d, h in data[region]["history"].items():
ret["daily"][d] = h - prev
prev = h
ret["iso2"] = data[region]["iso2"]
ret["iso3"] = data[region]["iso3"]
ret["name"] = region
return jsonify(ret)
raise CountryNotFound("This region cannot be found. Please try again.")
except CountryNotFound as e:
return util.response_error(message=f"{type(e).__name__} : {e}", status=404)
except Exception as e:
return util.response_error(message=f"{type(e).__name__} : {e}")
@cache.memoize()
def proportion_daily(data_type):
try:
data = util.read_json(f"csv_{data_type}.json")
for region in list(data.keys()):
ret = {"proportion-daily" : {}}
if data[region]["iso3"] == "":
# TODO: Note, some regions do not have iso2/3 codes....
data[region] = {"proportion-daily" : "This region doesn't work with this function atm"}
continue
if data[region]["iso3"] in util.populations:
pop = float(util.populations[data[region]["iso3"]])
else:
util.populations = util.csv_to_dict(util.CSV_POPULATIONS)
pop = float(util.populations[data[region]["iso3"]])
prev = 0
for d, h in data[region]["history"].items():
ret["proportion-daily"][d] = f"{round((h - prev) / pop * 100, 10):.10f}"
prev = int(h)
ret["iso2"] = data[region]["iso2"]
ret["iso3"] = data[region]["iso3"]
data[region] = ret
return jsonify(data)
except Exception as e:
return util.response_error(message=f"{type(e).__name__} : {e}")
@cache.memoize()
def proportion_daily_region_world(data_type):
try:
data = util.read_json(f"csv_{data_type}.json")
ret = {"proportion-daily" : {}}
for d in data.keys():
for h in data[d]["history"].keys():
if h not in ret["proportion-daily"]:
ret["proportion-daily"][h] = int(data[d]["history"][h])
else:
ret["proportion-daily"][h] += int(data[d]["history"][h])
prev = 0
for d, h in ret["proportion-daily"].items():
ret["proportion-daily"][d] = f"{round((h - prev) / int(util.WORLD_POPULATION) * 100, 10):.10f}"
prev = int(h)
return jsonify(ret)
except Exception as e:
return util.response_error(message=f"{type(e).__name__} : {e}")
@cache.memoize()
def proportion_daily_country(data_type, country):
try:
data = util.read_json(f"csv_{data_type}.json")
ret = {"proportion-daily" : {}}
for region in list(data.keys()):
if util.pattern_match(
country,
region,
data[region]["iso2"],
data[region]["iso3"]):
if data[region]["iso3"] in util.populations:
pop = float(util.populations[data[region]["iso3"]])
else:
util.populations = util.csv_to_dict(util.CSV_POPULATIONS)
pop = float(util.populations[data[region]["iso3"]])
prev = 0
for d, h in data[region]["history"].items():
ret["proportion-daily"][d] = f"{round((h - prev) / pop * 100, 10):.10f}"
prev = h
ret["iso2"] = data[region]["iso2"]
ret["iso3"] = data[region]["iso3"]
ret["name"] = region
return jsonify(ret)
raise CountryNotFound("This region cannot be found. Please try again.")
except CountryNotFound as e:
return util.response_error(message=f"{type(e).__name__} : {e}", status=404)
except Exception as e:
return util.response_error(message=f"{type(e).__name__} : {e}")
@api.route(f"/api/{API_VERSION}/all/")
class All(Resource):
@api.doc(responses=responses)
def get(self):
return all_data()
@api.route(f"/api/{API_VERSION}/all/<country>/")
class AllSelector(Resource):
@api.doc(responses=responses)
def get(self, country):
return all_country(country)
@api.route(f"/api/{API_VERSION}/history/<data_type>/")
class HistoryDataType(Resource):
@api.doc(
responses=responses)
def get(self, data_type: int):
return history(data_type)
@api.route(f"/api/{API_VERSION}/history/<data_type>/<country>/")
class HistoryDataTypeCountry(Resource):
@api.doc(responses=responses,
params={"data_type": "Input accepted : `confirmed` | `recovered` | `deaths`", "country": "Full name or ISO-3166-1"})
def get(self, data_type: str, country: str):
return history_country(data_type, country)
@api.route(f"/api/{API_VERSION}/history/<data_type>/<country>/<region>")
class HistoryDataTypeRegion(Resource):
@api.doc(responses=responses,
params={"data_type": "Input accepted : `confirmed` | `recovered` | `deaths`", "country": "Full name or ISO-3166-1", "region": "Region name"})
def get(self, data_type: str, country: str, region: str):
return history_region(data_type, country, region)
@api.route(f"/api/{API_VERSION}/history/<data_type>/<country>/regions")
class HistoryDataTypeRegions(Resource):
@api.doc(responses=responses,
params={"data_type": "Input accepted : `confirmed` | `recovered` | `deaths`", "country": "Full name or ISO-3166-1"})
def get(self, data_type: str, country: str):
return history_region_all(data_type, country)
@api.route(f"/api/{API_VERSION}/history/<data_type>/total")
class HistoryDataTypeTotal(Resource):
@api.doc(responses=responses,
params={"data_type": "Input accepted : `confirmed` | `recovered` | `deaths`"})
def get(self, data_type: str):
return history_region_world(data_type)
@api.route(f"/api/{API_VERSION}/proportion/<data_type>/")
class ProportionDataType(Resource):
@api.doc(responses=responses,
params={"data_type": "Input accepted : `confirmed` | `recovered` | `deaths`"})
def get(self, data_type: str):
return proportion(data_type)
@api.route(f"/api/{API_VERSION}/proportion/<data_type>/total")
class ProportionDataTypeTotal(Resource):
@api.doc(responses=responses,
params={"data_type": "Input accepted : `confirmed` | `recovered` | `deaths`"},
description="Returns the percentage of the world's population to be affected by COVID-19")
def get(self, data_type: str):
return proportion_region_world(data_type)
@api.route(f"/api/{API_VERSION}/proportion/<data_type>/<country>/")
class ProportionDataTypeCountry(Resource):
@api.doc(responses=responses,
params={"data_type": "Input accepted : `confirmed` | `recovered` | `deaths`", "country": "Full name or ISO-3166-1"})
def get(self, data_type: str, country: str):
return proportion_country(data_type, country)
@api.route(f"/api/{API_VERSION}/daily/<data_type>/")
class DailyDataType(Resource):
@api.doc(responses=responses,
params={"data_type": "Input | |
( self.tasks[self.iter][0] + key2str[e.key()], self.tasks[self.iter][1], self.tasks[self.iter][2], self.tasks[self.iter][3])
self.visualizer.setStringToPrint(self.tasks[self.iter][0])
elif((e.key() == QtCore.Qt.Key_Enter) or (e.key() == QtCore.Qt.Key_Return)):
# Validate and Execute
if(self.validateEquation() == True):
self.executeOnSuccess()
else:
self.visualizer.say("Wrong!")
self.errorOnPresentTask = True
def executeOnSuccess(self):
# If error was made in this answer (at some point)
# then no medal is given
if self.errorOnPresentTask == True:
self.numMistakes += 1
# Calculate number of group medals and single medals
num_medals = self.iter + 1 - self.numMistakes;
num_groups_medals = num_medals/5
num_medals = num_medals - num_groups_medals*5
x = self.geometry().x()
y = self.geometry().y()
width = self.geometry().width()
height = self.geometry().height()
sizeOfMedal = height/5
# Put medals starting from bottom left
group_idx = num_groups_medals
idx = 0
self.hideImages(self.tempMedals)
self.tempMedals = []
while num_groups_medals > 0:
self.tempMedals.append(QtSvg.QSvgWidget(self.resourcesPath + "/medals.svg", self))
self.tempMedals[-1].setGeometry(x+idx*sizeOfMedal,y + height - sizeOfMedal,sizeOfMedal,sizeOfMedal)
self.tempMedals[-1].show()
idx = idx + 1
num_groups_medals = num_groups_medals -1
while num_medals > 0:
self.tempMedals.append(QtSvg.QSvgWidget(self.resourcesPath + "/medal.svg", self))
self.tempMedals[-1].setGeometry(x+idx*sizeOfMedal,y + height - sizeOfMedal,sizeOfMedal,sizeOfMedal)
self.tempMedals[-1].show()
idx = idx + 1
num_medals = num_medals - 1
self.update()
# If there was an error in present puzzle then be less optimistic on
# in congratualtions
if self.errorOnPresentTask == True:
congrats = ["OK!","Finally!","Approved!"]
else:
congrats = ["Correct!","Excellent!","Great!","Very good!","Amazing!","Perfect!","Well done!","Awesome!"]
if self.tasks[self.iter][3] == "maze":
self.tasks[self.iter][4].say(random.choice(congrats))
else:
self.visualizer.say(random.choice(congrats))
self.visualizer.cleanup()
self.tasks[self.iter] = ( "", self.tasks[self.iter][1], self.tasks[self.iter][2], self.tasks[self.iter][3])
if self.tasks[self.iter][3] == "lang":
time.sleep(1)
self.visualizer.say("This is " + self.tasks[self.iter][2])
time.sleep(1)
if self.tasks[self.iter][3] == "clock":
time.sleep(1)
self.visualizer.say("It is " + str(self.tasks[self.iter][1]) + " o'clock")
time.sleep(1)
self.iter+=1
self.description = "" # Reset description of puzzle
self.hideImages(self.tempImages)
self.visualized = False
self.errorOnPresentTask = False
if self.iter == len(self.tasks):
# Calculate time to have computer acrivities for
timeToWatch = 20
if self.iter > self.numMistakes:
timeToWatch += (self.iter - self.numMistakes)
if self.choice_menu:
self.prepareChoice(timeToWatch)
else:
subprocess.call(["sudo","shutdown","-c"])
subprocess.call(["sudo","shutdown","-h","+"+str(timeToWatch)])
exit(111)
return
def hideImages(self,widgets):
for widget in widgets:
widget.setHidden(True)
def prepareChoice(self, timeToWatch):
self.hideImages(self.tempMedals)
choices = {"http://www.netflix.com" : "/netflix.svg", "http://youtube.com" : "/youtube.svg"}
# Extend chocies with user defined content
for entry in self.content:
choices[entry] = self.content[entry]
width = self.geometry().width()/len(choices)/2
height = self.geometry().height()/len(choices)/2
self.choice = self.Choice(choices,
self.resourcesPath,
self,
self.geometry().width()/5,
self.geometry().height()/5,
width,
height,
self.args.dry_run,
timeToWatch)
def validateEquation(self):
print("Validate!!")
# Get result typed and convert it to number
if(len(self.tasks[self.iter][0]) == self.lenBaseText[self.iter]) and self.tasks[self.iter][3] != "maze":
return False
# For maze we do not have a string values
if self.tasks[self.iter][3] == "+":
typed_result = int(self.tasks[self.iter][0][self.lenBaseText[self.iter]:])
computed_result = self.tasks[self.iter][1] + self.tasks[self.iter][2]
elif self.tasks[self.iter][3] == "-":
typed_result = int(self.tasks[self.iter][0][self.lenBaseText[self.iter]:])
computed_result = self.tasks[self.iter][1] - self.tasks[self.iter][2]
elif self.tasks[self.iter][3] == "*":
typed_result = int(self.tasks[self.iter][0][self.lenBaseText[self.iter]:])
computed_result = self.tasks[self.iter][1] * self.tasks[self.iter][2]
elif self.tasks[self.iter][3] == "/":
typed_result = int(self.tasks[self.iter][0][self.lenBaseText[self.iter]:])
computed_result = self.tasks[self.iter][1] / self.tasks[self.iter][2]
elif self.tasks[self.iter][3] == "?":
typed_result = int(self.tasks[self.iter][0][self.lenBaseText[self.iter]:])
computed_result = self.tasks[self.iter][1]
elif self.tasks[self.iter][3] == "lang":
typed_result = int(self.tasks[self.iter][0][self.lenBaseText[self.iter]:])
computed_result = self.tasks[self.iter][1]
elif self.tasks[self.iter][3] == "dialogues":
typed_result = int(self.tasks[self.iter][0][self.lenBaseText[self.iter]:])
computed_result = self.tasks[self.iter][1]
elif self.tasks[self.iter][3] == "typing":
typed_result = (self.tasks[self.iter][0][self.lenBaseText[self.iter]:])
computed_result = self.tasks[self.iter][1]
elif self.tasks[self.iter][3] == "text":
typed_result = int(self.tasks[self.iter][0][self.lenBaseText[self.iter]:])
computed_result = self.tasks[self.iter][1]
elif self.tasks[self.iter][3] == "snail":
typed_result = int(self.tasks[self.iter][0][self.lenBaseText[self.iter]:])
computed_result = self.tasks[self.iter][1]
elif self.tasks[self.iter][3] == "buying":
typed_result = int(self.tasks[self.iter][0][self.lenBaseText[self.iter]:])
computed_result = self.tasks[self.iter][1]
elif self.tasks[self.iter][3] == "arrangement":
typed_result = int(self.tasks[self.iter][0][self.lenBaseText[self.iter]:])
computed_result = self.tasks[self.iter][2]
elif self.tasks[self.iter][3] == "clock":
typed_result = int(self.tasks[self.iter][0][self.lenBaseText[self.iter]:])
computed_result = self.tasks[self.iter][1]
elif self.tasks[self.iter][3] == "maze":
# If coords of princess and knight are the same
# then puzzle of maze is solved eg. knight met princess
computed_result = self.tasks[self.iter][4].princessPosY*self.tasks[self.iter][4].width + self.tasks[self.iter][4].princessPosX
typed_result = self.tasks[self.iter][4].knightPosY*self.tasks[self.iter][4].width + self.tasks[self.iter][4].knightPosX
elif self.tasks[self.iter][3] == "memory":
computed_result = 0
typed_result = 1
# compare typed result with computed result
if(typed_result == computed_result):
return True
else:
return False
def parseDialogueFile(self, dialoguePath):
questions = []
answers = []
with open(dialoguePath) as f:
i = 0
for line in f:
if not line.strip(): continue
if i == 0:
questions.append(line)
else:
answers.append(line)
i = 1 - i
index = random.randint(0,len(questions)-1)
question = questions[index]
answer = answers[index]
question = ' '.join(question.split()[1:])
answer = ' '.join(answer.split()[1:])
return question, answer
def prepareDialoguesData(self):
""" Load first dialog randomly and store question and answer.
Then load two random answers from other modules"""
dialoguesNames = listdir(self.dialogues)
# Remove README from dialogues list
dialoguesNames.remove("README")
# Pick dialogue from list of dialogues
dialogue = random.choice(dialoguesNames)
dialoguesNames.remove(dialogue)
# Parse dialogue
question, answer = self.parseDialogueFile(self.dialogues+"/"+dialogue)
description = dialogue.strip(".txt").replace("_"," ")+"\n. "
# Pick two more answers from other dialogues
dialogue = random.choice(dialoguesNames)
dialoguesNames.remove(dialogue)
badquestion, badanswer1 = self.parseDialogueFile(self.dialogues+"/"+dialogue)
dialogue = random.choice(dialoguesNames)
dialoguesNames.remove(dialogue)
badquestion, badanswer2 = self.parseDialogueFile(self.dialogues+"/"+dialogue)
return description, question, answer, badanswer1, badanswer2
def prepareTestData(self, imagesDirPath):
"""# Load images randomly
os listdir , choose randomyl 3 files
diffrent ones, then one should be read eg. image loaded
and printed, other just need as invalid answer
proper answer randomly to be set and storedc"""
# TODO: make sure it is only files not directories
imagesNames = listdir(imagesDirPath)
# Get Randomly imagename to be proper answer and its picture
correctOneName = random.choice(imagesNames)
# Here is name of animal that corresponds to picture
correctAnimalName = correctOneName.replace("-wt.gif","").replace("-vt.gif","").replace("-vb.gif","").replace("-wb.gif","").replace(".gif","")
incorrectAnimalName1, incorrectAnimalName2 = self.getIncorrectAnswers(imagesNames, correctAnimalName)
correctPicFileName = imagesDirPath + "/" + correctOneName
return correctPicFileName, correctAnimalName, incorrectAnimalName1,incorrectAnimalName2
def prepareTypingTestData(self, imagesDirPath):
"""# Load image randomly
os listdir , choose randomyl 1 file
proper answer (expected typing) is returned"""
# TODO: make sure it is only files not directories
imagesNames = listdir(imagesDirPath)
# Get Randomly imagename to be proper answer and its picture
correctOneName = random.choice(imagesNames)
# Here is name of animal that corresponds to picture
correctAnimalName = correctOneName.replace("-wt.gif","").replace("-vt.gif","").replace("-vb.gif","").replace("-wb.gif","").replace(".gif","")
correctAnimalName = correctAnimalName.replace("-"," ")
correctPicFileName = imagesDirPath + "/" + correctOneName
return correctPicFileName, correctAnimalName
def computeAnswerAndTotal(self, param_pair, maxValue):
# Kasia_items * coeff[0] + coeff[1] + Kasia_items < maxValue <=> (maxValue - coeff[1])/(1 + coeff[0]) >= Kasia_items
# max(2,2 - param_pair[1]) as 2 - 3 may give -1 for stephane items
kasia_items = random.randint(max(2,2 - param_pair[1]), int((maxValue - param_pair[1])/(1 + param_pair[0])))
stephane_items = kasia_items*param_pair[0] + param_pair[1]
if stephane_items != int(stephane_items):
stephane_items = int(stephane_items)
kasia_items = (stephane_items - param_pair[1])/param_pair[0]
sum_items = int(kasia_items + kasia_items*param_pair[0] + param_pair[1])
return kasia_items, sum_items
def prepareSnailPuzzle(self, maxValue):
runners = ["snail-1.svg","snake-2.svg","cat-3.svg"]
participant = random.choice(runners)
speed = int(participant.replace(".svg","")[participant.find('-')+1:])
k = random.randint(speed,int(maxValue/speed))
return participant, k, k*speed
def prepareMemoryPuzzle(self, imagesDirPath, numImages):
imagesNames = listdir(imagesDirPath)
# Pick minimum from images available and images requested
numImages = min(len(imagesNames) , numImages)
unique = random.sample(imagesNames ,numImages)
return unique + unique
def prepareTextPuzzle(self, maxValue):
"""Generate Text puzzle and return in a form of: relation(text), correct answer, total number of items"""
# TODO: Add unit test , mandatory
# Stephany has...
relations = {"three more than": (1,3) ,"two more than": (1,2) ,"one more than" : (1,1), "one less than" : (1,-1), "two less than" : (1,-2), "three less than" : (1,-3), "twice as much as" : (2,0), "half of what" : (0.5, 0)}
relation = random.choice(relations.keys())
kasia_items, sum_items = self.computeAnswerAndTotal(relations[relation],maxValue)
return relation, kasia_items, sum_items
# TODO: Add unit test
def generateCoins(self, total):
""" Based on provided total money generate coins that add up to given total"""
available_coins = [ (0,10) , (0,20), (0,50), (1,0), (2,0), (5,0)]
pocket_coins = { (0,10) : 0 , (0,20) : 0, (0,50) : 0, (1,0) : 0 , (2,0) : 0, (5,0) : 0}
value = 0
while value < total:
# Choose coin by random
(coin_zlotys, coin_groszys) = random.choice(available_coins)
#if coins alothogether do not exceed total sum of pocket money then
potential_value = value + coin_zlotys*100 + coin_groszys
if potential_value > total:
continue
value += coin_zlotys*100 + coin_groszys
# add coin to pocket money and update sum of coins accordingly
pocket_coins[(coin_zlotys,coin_groszys)] += 1
return pocket_coins
def prepareBuyingPuzzle(self):
# TODO : Add more items
items = ["ice_cream-0-50.svg","bear-2-0.svg","lollipop-0-20.svg","cookie-0-30.svg"]
item_to_buy = random.choice(items)
# Get base name of item and its price
data = item_to_buy.split('-')
(zlotys,groszys) = (int(data[1]) , int(data[2].replace(".svg","")))
# Generate pocket money (multiplication of 10 groszys)
item_value = zlotys*100 + groszys
pocket_money = (random.randint(int(item_value*0.75),4*(item_value)))/10*10;
# Compute potential number of items to buy
answer = int(pocket_money / item_value)
# Generate coins
coins = self.generateCoins(pocket_money)
return item_to_buy, answer, coins,
def prepareClockTestData(self):
""" Load a | |
db_column='to_table', related_name = 'to_table')
to_column = models.CharField(max_length=50)
value_vocabulary_id = models.CharField(max_length=50, blank=True, null=True)
value_concept_code = models.CharField(max_length=50, blank=True, null=True)
addl_column = models.CharField(max_length=50, blank=True, null=True)
addl_value = models.CharField(max_length=50, blank=True, null=True)
from_date_column = models.CharField(max_length=50, blank=True, null=True)
where_clause = models.CharField(max_length=256)
comment = models.CharField(max_length=256, blank=True, null=True)
class Meta:
managed = False
db_table = 'events_mapping'
unique_together = (('study', 'from_table', 'from_column', 'to_table', 'to_column', 'where_clause'),)
class ExtractStudy(models.Model):
extract_study_id = models.IntegerField(primary_key=True)
study_id = models.IntegerField(blank=True, null=True)
name = models.CharField(max_length=100, blank=True, null=True)
comment = models.CharField(max_length=1000, blank=True, null=True)
class Meta:
managed = False
db_table = 'extract_study'
#class FactRelationship(models.Model):
# domain_concept_id_1 = models.ForeignKey(Concept, models.DO_NOTHING, db_column='domain_concept_id_1')
# fact_id_1 = models.IntegerField()
# domain_concept_id_2 = models.ForeignKey(Concept, models.DO_NOTHING, db_column='domain_concept_id_2')
# fact_id_2 = models.IntegerField()
# relationship_concept = models.ForeignKey(Concept, models.DO_NOTHING)
#
# class Meta:
# managed = False
# db_table = 'fact_relationship'
class Location(models.Model):
location_id = models.IntegerField(primary_key=True)
address_1 = models.CharField(max_length=50, blank=True, null=True)
address_2 = models.CharField(max_length=50, blank=True, null=True)
city = models.CharField(max_length=50, blank=True, null=True)
state = models.CharField(max_length=2, blank=True, null=True)
zip = models.CharField(max_length=9, blank=True, null=True)
county = models.CharField(max_length=20, blank=True, null=True)
location_source_value = models.CharField(max_length=50, blank=True, null=True)
class Meta:
managed = False
db_table = 'location'
class Measurement(models.Model):
measurement_id = models.IntegerField(primary_key=True)
person = models.ForeignKey('Person', models.DO_NOTHING)
measurement_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'measurement_concept')
measurement_date = models.DateField()
measurement_datetime = models.DateTimeField(blank=True, null=True)
measurement_type_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'measurement_type_concept')
operator_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'operator_concept')
value_as_number = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
value_as_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'measurement_value_as_concept')
unit_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'measurement_unit_concept')
range_low = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
range_high = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
#provider = models.ForeignKey('Provider', models.DO_NOTHING, blank=True, null=True)
provider = models.IntegerField(blank=True, null=True)
visit_occurrence = models.ForeignKey('VisitOccurrence', models.DO_NOTHING, blank=True, null=True)
measurement_source_value = models.CharField(max_length=50, blank=True, null=True)
measurement_source_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'measurement_source_concept')
unit_source_value = models.CharField(max_length=50, blank=True, null=True)
value_source_value = models.CharField(max_length=50, blank=True, null=True)
class Meta:
managed = False
db_table = 'measurement'
class Note(models.Model):
note_id = models.IntegerField(primary_key=True)
person = models.ForeignKey('Person', models.DO_NOTHING)
note_date = models.DateField()
note_datetime = models.DateTimeField(blank=True, null=True)
note_type_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'note_type_concept')
note_class_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'note_class_concept')
note_title = models.CharField(max_length=250, blank=True, null=True)
note_text = models.TextField()
encoding_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'encoding_concept')
language_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'language_concept')
#provider = models.ForeignKey('Provider', models.DO_NOTHING, blank=True, null=True)
provider = models.IntegerField(blank=True, null=True)
visit_occurrence = models.ForeignKey('VisitOccurrence', models.DO_NOTHING, blank=True, null=True)
note_source_value = models.CharField(max_length=50, blank=True, null=True)
class Meta:
managed = False
db_table = 'note'
#class NoteNlp(models.Model):
# note_nlp_id = models.BigIntegerField(primary_key=True)
# note = models.ForeignKey(Note, models.DO_NOTHING)
# section_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# snippet = models.CharField(max_length=250, blank=True, null=True)
# offset = models.CharField(max_length=250, blank=True, null=True)
# lexical_variant = models.CharField(max_length=250)
# note_nlp_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# note_nlp_source_concept_id = models.IntegerField(blank=True, null=True)
# nlp_system = models.CharField(max_length=250, blank=True, null=True)
# nlp_date = models.DateField()
# nlp_datetime = models.DateTimeField(blank=True, null=True)
# term_exists = models.CharField(max_length=1, blank=True, null=True)
# term_temporal = models.CharField(max_length=50, blank=True, null=True)
# term_modifiers = models.CharField(max_length=2000, blank=True, null=True)
#
# class Meta:
# managed = False
# db_table = 'note_nlp'
class Observation(models.Model):
observation_id = models.IntegerField(primary_key=True)
person = models.ForeignKey('Person', models.DO_NOTHING)
observation_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'observation_concept')
observation_date = models.DateField()
observation_datetime = models.DateTimeField(blank=True, null=True)
observation_type_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'observation_type_concept')
value_as_number = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
value_as_string = models.CharField(max_length=60, blank=True, null=True)
value_as_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'observation_value_as_concept')
qualifier_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
unit_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'observation_unit_concept')
#provider = models.ForeignKey('Provider', models.DO_NOTHING, blank=True, null=True)
provider = models.IntegerField(blank=True, null=True)
visit_occurrence = models.ForeignKey('VisitOccurrence', models.DO_NOTHING, blank=True, null=True)
observation_source_value = models.CharField(max_length=50, blank=True, null=True)
observation_source_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'observation_source_concept')
unit_source_value = models.CharField(max_length=50, blank=True, null=True)
qualifier_source_value = models.CharField(max_length=50, blank=True, null=True)
class Meta:
managed = False
db_table = 'observation'
#class ObservationPeriod(models.Model):
# observation_period_id = models.IntegerField(primary_key=True)
# person = models.ForeignKey('Person', models.DO_NOTHING)
# observation_period_start_date = models.DateField()
# observation_period_start_datetime = models.DateTimeField()
# observation_period_end_date = models.DateField()
# observation_period_end_datetime = models.DateTimeField()
# period_type_concept = models.ForeignKey(Concept, models.DO_NOTHING)
#
# class Meta:
# managed = False
# db_table = 'observation_period'
class OhdsiCalculationArgument(models.Model):
vocabulary = models.ForeignKey('VocabularyConcept', models.DO_NOTHING, related_name = 'vocabulary_concept')
concept_code = models.CharField(max_length=100)
study = models.ForeignKey('Study', models.DO_NOTHING, primary_key=True)
function_name = models.CharField(max_length=100)
argument_order = models.IntegerField(blank=True, null=True)
argument_name = models.CharField(max_length=30, blank=True, null=True)
value_field = models.CharField(max_length=20, blank=True, null=True)
to_concept_code = models.CharField(max_length=100)
to_vocabulary = models.ForeignKey('VocabularyConcept', models.DO_NOTHING, related_name = 'to_vocabulary_concept')
from_table = models.CharField(max_length=100, blank=True, null=True)
class Meta:
managed = False
db_table = 'ohdsi_calculation_argument'
unique_together = (('study', 'function_name', 'to_concept_code', 'to_vocabulary', 'vocabulary', 'concept_code'),)
class OhdsiCalculationFunction(models.Model):
study = models.ForeignKey('Study', models.DO_NOTHING, primary_key=True)
function_name = models.CharField(max_length=100)
to_vocabulary = models.ForeignKey('VocabularyConcept', models.DO_NOTHING)
to_concept_code = models.CharField(max_length=100)
to_table = models.CharField(max_length=100, blank=True, null=True)
to_column = models.CharField(max_length=100, blank=True, null=True)
function_order = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'ohdsi_calculation_function'
unique_together = (('study', 'function_name', 'to_concept_code', 'to_vocabulary'),)
#class PayerPlanPeriod(models.Model):
# payer_plan_period_id = models.IntegerField(primary_key=True)
# person = models.ForeignKey('Person', models.DO_NOTHING)
# payer_plan_period_start_date = models.DateField()
# payer_plan_period_end_date = models.DateField()
# payer_source_value = models.CharField(max_length=50, blank=True, null=True)
# plan_source_value = models.CharField(max_length=50, blank=True, null=True)
# family_source_value = models.CharField(max_length=50, blank=True, null=True)
#
# class Meta:
# managed = False
# db_table = 'payer_plan_period'
class Person(models.Model):
person_id = models.IntegerField(primary_key=True)
gender_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'gender_concept')
year_of_birth = models.IntegerField()
month_of_birth = models.IntegerField(blank=True, null=True)
day_of_birth = models.IntegerField(blank=True, null=True)
birth_datetime = models.DateTimeField(blank=True, null=True)
race_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'race_concept')
ethnicity_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'ethnicity_concept')
location = models.ForeignKey(Location, models.DO_NOTHING, blank=True, null=True)
#provider = models.ForeignKey('Provider', models.DO_NOTHING, blank=True, null=True)
provider = models.IntegerField(blank=True, null=True)
#care_site = models.ForeignKey(CareSite, models.DO_NOTHING, blank=True, null=True)
care_site = models.IntegerField(blank=True, null=True)
person_source_value = models.CharField(max_length=50, blank=True, null=True)
gender_source_value = models.CharField(max_length=50, blank=True, null=True)
gender_source_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'gender_source_concept')
race_source_value = models.CharField(max_length=50, blank=True, null=True)
race_source_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'race_source_concept')
ethnicity_source_value = models.CharField(max_length=50, blank=True, null=True)
ethnicity_source_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'ethnicity_source_concept')
class Meta:
managed = False
db_table = 'person'
class ProcedureOccurrence(models.Model):
procedure_occurrence_id = models.IntegerField(primary_key=True)
person = models.ForeignKey(Person, models.DO_NOTHING)
procedure_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'procedure_concept')
procedure_date = models.DateField()
procedure_datetime = models.DateTimeField()
procedure_type_concept = models.ForeignKey(Concept, models.DO_NOTHING, related_name = 'procedure_type_concept')
modifier_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'modifier_concept')
quantity = models.IntegerField(blank=True, null=True)
#provider = models.ForeignKey('Provider', models.DO_NOTHING, blank=True, null=True)
provider = models.IntegerField(blank=True, null=True)
visit_occurrence = models.ForeignKey('VisitOccurrence', models.DO_NOTHING, blank=True, null=True)
procedure_source_value = models.CharField(max_length=50, blank=True, null=True)
procedure_source_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True, related_name = 'procedure_source_concept')
qualifier_source_value = models.CharField(max_length=50, blank=True, null=True)
class Meta:
managed = False
db_table = 'procedure_occurrence'
#class Provider(models.Model):
# provider_id = models.IntegerField(primary_key=True)
# provider_name = models.CharField(max_length=255, blank=True, null=True)
# npi = models.CharField(max_length=20, blank=True, null=True)
# dea = models.CharField(max_length=20, blank=True, null=True)
# specialty_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# care_site = models.ForeignKey(CareSite, models.DO_NOTHING, blank=True, null=True)
# year_of_birth = models.IntegerField(blank=True, null=True)
# gender_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# provider_source_value = models.CharField(max_length=50, blank=True, null=True)
# specialty_source_value = models.CharField(max_length=50, blank=True, null=True)
# specialty_source_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# gender_source_value = models.CharField(max_length=50, blank=True, null=True)
# gender_source_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
#
# class Meta:
# managed = False
# db_table = 'provider'
class Relationship(models.Model):
relationship_id = models.CharField(primary_key=True, max_length=20)
relationship_name = models.CharField(max_length=255)
is_hierarchical = models.CharField(max_length=1)
defines_ancestry = models.CharField(max_length=1)
reverse_relationship = models.ForeignKey('self', models.DO_NOTHING)
relationship_concept = models.ForeignKey(Concept, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'relationship'
class SourceToConceptMap(models.Model):
source_code = models.CharField(max_length=50)
source_concept_id = models.IntegerField()
source_vocabulary = models.ForeignKey('Vocabulary', models.DO_NOTHING, primary_key=True, related_name = 'source_vocabulary')
source_code_description = models.CharField(max_length=255, blank=True, null=True)
target_concept = models.ForeignKey(Concept, models.DO_NOTHING)
target_vocabulary = models.ForeignKey('Vocabulary', models.DO_NOTHING, related_name = 'target_vocabulary')
valid_start_date = models.DateField()
valid_end_date = models.DateField()
invalid_reason = models.CharField(max_length=1, blank=True, null=True)
class Meta:
managed = False
db_table = 'source_to_concept_map'
unique_together = (('source_vocabulary', 'target_concept', 'source_code', 'valid_end_date'),)
#
#class Specimen(models.Model):
# specimen_id = models.IntegerField(primary_key=True)
# person = models.ForeignKey(Person, models.DO_NOTHING)
# specimen_concept = models.ForeignKey(Concept, models.DO_NOTHING)
# specimen_type_concept = models.ForeignKey(Concept, models.DO_NOTHING)
# specimen_date = models.DateField()
# specimen_datetime = models.DateTimeField(blank=True, null=True)
# quantity = models.DecimalField(max_digits=65535, decimal_places=65535, blank=True, null=True)
# unit_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# anatomic_site_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# disease_status_concept = models.ForeignKey(Concept, models.DO_NOTHING, blank=True, null=True)
# specimen_source_id = models.CharField(max_length=50, blank=True, null=True)
# specimen_source_value = models.CharField(max_length=50, blank=True, null=True)
# unit_source_value = models.CharField(max_length=50, blank=True, null=True)
# anatomic_site_source_value = models.CharField(max_length=50, blank=True, null=True)
# disease_status_source_value = models.CharField(max_length=50, blank=True, null=True)
#
# class Meta:
# managed = False
# db_table = 'specimen'
#
class Study(models.Model):
study_id = models.IntegerField(primary_key=True)
study_name = models.CharField(max_length=100, blank=True, null=True)
person_id_range_start = models.IntegerField(blank=True, null=True)
person_id_range_end = models.IntegerField(blank=True, null=True)
observation_range_start = models.IntegerField(blank=True, null=True)
observation_range_end = models.IntegerField(blank=True, null=True)
loaded = models.BooleanField()
migrated = models.BooleanField()
calculated = models.BooleanField()
id_field_name = models.CharField(max_length=10, blank=True, null=True)
person_id_prefix = models.CharField(max_length=10, blank=True, null=True)
person_id_select = models.CharField(max_length=100, blank=True, null=True)
person_details_select = models.CharField(max_length=200, blank=True, null=True)
study_class = models.CharField(max_length=50, blank=True, null=True)
sex_table_name = models.CharField(max_length=100, blank=True, null=True)
sex_column_name = models.CharField(max_length=100, blank=True, null=True)
sex_function_name = models.CharField(max_length=100, blank=True, null=True)
race_table_name = models.CharField(max_length=100, blank=True, null=True)
race_column_name = models.CharField(max_length=100, blank=True, null=True)
race_function_name = models.CharField(max_length=100, blank=True, null=True)
class Meta:
managed = False
db_table = 'study'
class TableColumn(models.Model):
table_column_id = models.IntegerField(primary_key=True)
study = models.ForeignKey(Study, models.DO_NOTHING, blank=True, null=True)
table_name = models.CharField(max_length=50)
column_name = | |
<filename>account_invoice.py
# -*- coding: utf-8 -*-
from openerp import models,fields,api
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
from openerp.exceptions import Warning
import time
from datetime import date, datetime
from ftplib import FTP
import os
import tempfile
from pyPdf import PdfFileWriter, PdfFileReader
from contextlib import closing
import logging
_logger = logging.getLogger(__name__)
modele_mail=u"""
<html>
<head>
<meta content="text/html; charset=UTF-8" http-equiv="Content-Type">
</head>
<body>
<font>Bonjour, </font>
<br><br>
<font> Veuillez trouver ci-joint notre facture.</font>
<br><br>
Cordialement <br><br>
[from]<br>
</body>
</html>
"""
class is_account_folio(models.Model):
_name = 'is.account.folio'
_order = 'name desc'
name = fields.Char('N° de Folio' , readonly=True)
date_creation = fields.Date("Date de création" , readonly=True)
createur_id = fields.Many2one('res.users', 'Créé par', readonly=True)
invoice_ids = fields.One2many('account.invoice', 'is_folio_id', 'Factures', readonly=True)
def _date_creation():
return date.today() # Date du jour
_defaults = {
'date_creation': _date_creation(),
'createur_id': lambda obj, cr, uid, ctx=None: uid,
}
@api.model
def create(self, vals):
data_obj = self.env['ir.model.data']
sequence_ids = data_obj.search([('name','=','seq_is_account_folio')])
if len(sequence_ids)>0:
sequence_id = sequence_ids[0].res_id
vals['name'] = self.env['ir.sequence'].get_id(sequence_id, 'id')
res = super(is_account_folio, self).create(vals)
return res
class account_move_line(models.Model):
_inherit = "account.move.line"
is_account_invoice_line_id = fields.Many2one('account.invoice.line', 'Ligne de facture')
class account_invoice(models.Model):
_inherit = 'account.invoice'
_order = 'id desc'
is_document = fields.Char('Document' , help="Ce champ est utilisé dans les factures diverses pour saisir le moule ou le n° d'investissement")
is_num_cde_client = fields.Char('N° Cde Client', help="Ce champ est utilisé dans les factures diverses sans commande client dans Odoo")
is_num_bl_manuel = fields.Char('N° BL manuel' , help="Ce champ est utilisé dans les factures diverses sans bon de livraison dans Odoo")
is_escompte = fields.Float("Escompte", compute='_compute')
is_tva = fields.Float("TVA" , compute='_compute', help="Taxes sans l'escompte")
is_folio_id = fields.Many2one('is.account.folio', 'Folio')
is_export_cegid_id = fields.Many2one('is.export.cegid' , 'Folio Cegid')
is_bon_a_payer = fields.Boolean("Bon à payer", default=True)
is_type_facture = fields.Selection([
('standard' , u'Standard'),
('diverse' , u'Diverse'),
('avoir-qt' , u'Avoir quantité'),
('avoir-prix', u'Avoir prix'),
], u"Type de facture", default='standard', select=True)
is_origine_id = fields.Many2one('account.invoice', "Facture d'origine")
is_mode_envoi_facture = fields.Selection([
('courrier' , 'Envoi par courrier'),
('courrier2' , 'Envoi par courrier en double exemplaire'),
('mail' , 'Envoi par mail (1 mail par facture)'),
('mail2' , 'Envoi par mail (1 mail par facture en double exemplaire)'),
('mail_client' , 'Envoi par mail (1 mail par client)'),
('mail_client_bl', 'Envoi par mail avec BL (1 mail par client)'),
], "Mode d'envoi de la facture")
is_date_envoi_mail = fields.Datetime("Mail envoyé le", readonly=False)
is_masse_nette = fields.Float("Masse nette (Kg)")
def _compute(self):
for obj in self:
escompte = tva = 0
for tax in obj.tax_line:
if tax.account_id.code=='665000':
escompte=escompte+tax.amount
else:
tva=tva++tax.amount
obj.is_escompte = escompte
obj.is_tva = tva
@api.multi
def copy(self,vals):
vals['is_folio_id'] = False
vals['is_export_cegid_id'] = False
res=super(account_invoice, self).copy(vals)
return res
@api.multi
def voir_facture_client_action(self):
for obj in self:
view_id=self.env.ref('is_plastigray.is_invoice_form')
res= {
'name': '<NAME>',
'view_mode': 'form',
'view_type': 'form',
'res_model': 'account.invoice',
'res_id': obj.id,
'view_id': view_id.id,
'type': 'ir.actions.act_window',
'context': {'default_type':'out_invoice', 'type':'out_invoice', 'journal_type': 'sale'},
'domain': [('type','=','out_invoice'),('journal_type','=','sale')],
}
return res
@api.multi
def voir_facture_fournisseur_action(self):
for obj in self:
view_id=self.env.ref('is_plastigray.is_invoice_supplier_form')
res= {
'name': '<NAME>',
'view_mode': 'form',
'view_type': 'form',
'res_model': 'account.invoice',
'res_id': obj.id,
'view_id': view_id.id,
'type': 'ir.actions.act_window',
'context': {'default_type':'in_invoice', 'type':'in_invoice'},
'domain': [('type','=','in_invoice')],
}
return res
@api.multi
def invoice_print(self):
assert len(self) == 1, 'This option should only be used for a single id at a time.'
self.sent = True
res = self.env['report'].get_action(self, 'is_plastigray.is_report_invoice')
return res
@api.multi
def _merge_pdf(self, documents):
"""Merge PDF files into one.
:param documents: list of path of pdf files
:returns: path of the merged pdf
"""
writer = PdfFileWriter()
streams = [] # We have to close the streams *after* PdfFilWriter's call to write()
for document in documents:
pdfreport = file(document, 'rb')
streams.append(pdfreport)
reader = PdfFileReader(pdfreport)
for page in range(0, reader.getNumPages()):
writer.addPage(reader.getPage(page))
merged_file_fd, merged_file_path = tempfile.mkstemp(suffix='.pdf', prefix='report.merged.tmp.')
with closing(os.fdopen(merged_file_fd, 'w')) as merged_file:
writer.write(merged_file)
for stream in streams:
stream.close()
return merged_file_path
@api.multi
def imprimer_simple_double(self):
"""Imprimer en simple ou double exemplaire"""
cr , uid, context = self.env.args
db = self._cr.dbname
path="/tmp/factures-" + db + '-'+str(uid)
cde="rm -Rf " + path
os.popen(cde).readlines()
if not os.path.exists(path):
os.makedirs(path)
nb=len(self)
ct=1
paths=[]
for obj in self:
msg = str(ct)+'/'+str(nb)+' - Imprimer en simple ou double exemplaire : '+str(obj.number)
_logger.info(msg)
ct+=1
result = self.env['report'].get_pdf(obj, 'is_plastigray.is_report_invoice')
r = range(1, 2)
if obj.is_mode_envoi_facture=='courrier2':
r = range(1, 3)
for x in r:
file_name = path + '/'+str(obj.number) + '-' + str(x) + '.pdf'
fd = os.open(file_name,os.O_RDWR|os.O_CREAT)
try:
os.write(fd, result)
finally:
os.close(fd)
paths.append(file_name)
# ** Merge des PDF *****************************************************
path_merged=self._merge_pdf(paths)
pdfs = open(path_merged,'rb').read().encode('base64')
# **********************************************************************
# ** Recherche si une pièce jointe est déja associèe *******************
attachment_obj = self.env['ir.attachment']
name = 'factures-' + db + '-' + str(uid) + '.pdf'
attachments = attachment_obj.search([('name','=',name)],limit=1)
# **********************************************************************
# ** Creation ou modification de la pièce jointe ***********************
vals = {
'name': name,
'datas_fname': name,
'type': 'binary',
'datas': pdfs,
}
if attachments:
for attachment in attachments:
attachment.write(vals)
attachment_id=attachment.id
else:
attachment = attachment_obj.create(vals)
attachment_id=attachment.id
#***********************************************************************
#** Envoi du PDF mergé dans le navigateur ******************************
if attachment_id:
return {
'type' : 'ir.actions.act_url',
'url': '/web/binary/saveas?model=ir.attachment&field=datas&id='+str(attachment_id)+'&filename_field=name',
'target': 'new',
}
#***********************************************************************
@api.multi
def envoi_par_mail(self):
"""Envoi du mail directement sans passer par le wizard"""
cr , uid, context = self.env.args
if not self.pool['res.users'].has_group(cr, uid, 'is_plastigray.is_comptable_group'):
raise Warning(u"Accès non autorisé !")
ids=[]
for obj in self:
ids.append(str(obj.id))
if len(ids)>0:
SQL="""
select ai.is_mode_envoi_facture, ai.partner_id, ai.name, ai.id
from account_invoice ai
where
ai.id in("""+','.join(ids)+""") and
ai.is_date_envoi_mail is null and
ai.is_mode_envoi_facture like 'mail%'
order by ai.is_mode_envoi_facture, ai.partner_id, ai.name
"""
cr.execute(SQL)
result = cr.fetchall()
# ** Un mail par client*********************************************
partners={}
for row in result:
if row[0]=='mail_client':
partner_id = row[1]
id = row[3]
if not partner_id in partners:
partners[partner_id]=[]
partners[partner_id].append(id)
#*******************************************************************
# ** Un mail+BL par client******************************************
for row in result:
if row[0]=='mail_client_bl':
partner_id = row[1]
id = row[3]
if not partner_id in partners:
partners[partner_id]=[]
partners[partner_id].append(id)
#*******************************************************************
#** Envoi des mails par partner ************************************
for partner_id in partners:
ids=partners[partner_id]
self._envoi_par_mail(partner_id, ids)
#*******************************************************************
# ** Un mail par facture *******************************************
for row in result:
if row[0]=='mail':
partner_id = row[1]
id = row[3]
self._envoi_par_mail(partner_id, [id])
#*******************************************************************
# ** Un mail par facture en double exemplaire **********************
for row in result:
if row[0]=='mail2':
partner_id = row[1]
id = row[3]
self._envoi_par_mail(partner_id, [id])
#*******************************************************************
@api.multi
def _envoi_par_mail(self, partner_id, ids):
cr , uid, context = self.env.args
user = self.env['res.users'].browse(self._uid)
if user.email==False:
raise Warning(u"Votre mail n'est pas renseigné !")
attachment_ids=[]
for id in ids:
invoice = self.env['account.invoice'].browse(id)
attachments = self.env['ir.attachment'].search([
('res_model','=','account.invoice'),
('res_id' ,'=',id),
], order='id desc', limit=1)
if len(attachments)==0:
raise Warning(u"Facture "+invoice.number+" non générée (non imprimée) !")
for attachment in attachments:
if invoice.is_mode_envoi_facture=='mail2':
# ** Duplication de la facture + fusion ********************
db = self._cr.dbname
path="/tmp/factures-" + db + '-'+str(uid)
cde="rm -Rf " + path
os.popen(cde).readlines()
if not os.path.exists(path):
os.makedirs(path)
paths=[]
for x in range(1, 3):
file_name = path + '/'+str(invoice.number) + '-' + str(x) + '.pdf'
fd = os.open(file_name,os.O_RDWR|os.O_CREAT)
try:
os.write(fd, attachment.datas.decode('base64'))
finally:
os.close(fd)
paths.append(file_name)
# ** Merge des PDF *****************************************
path_merged=self._merge_pdf(paths)
pdfs = open(path_merged,'rb').read().encode('base64')
# **********************************************************
# ** Création d'une piece jointe fusionnée *****************
name = 'facture-' + str(invoice.number) + '-' + str(uid) + '.pdf'
vals = {
'name': name,
'datas_fname': name,
'type': 'binary',
'datas': pdfs,
}
new = self.env['ir.attachment'].create(vals)
attachment_id=new.id
#***********************************************************
else:
attachment_id=attachment.id
attachment_ids.append(attachment_id)
partner = self.env['res.partner'].browse(partner_id)
if partner.is_mode_envoi_facture=='mail_client_bl':
attachment_obj = self.env['ir.attachment']
for id in ids:
invoice = self.env['account.invoice'].browse(id)
for line in invoice.invoice_line:
picking=line.is_move_id.picking_id
# ** Recherche si une pièce jointe est déja associèe au bl *
model='stock.picking'
name='BL-'+picking.name+u'.pdf'
attachments = attachment_obj.search([('res_model','=',model),('res_id','=',picking.id),('name','=',name)])
# **********************************************************
# ** Creation ou modification de la pièce jointe *******************
pdf = self.env['report'].get_pdf(picking, 'stock.report_picking')
vals = {
'name': name,
'datas_fname': name,
'type': 'binary',
'res_model': model,
'res_id': picking.id,
'datas': pdf.encode('base64'),
}
if attachments:
for attachment in attachments:
attachment.write(vals)
attachment_id=attachment.id
else:
attachment = attachment_obj.create(vals)
attachment_id=attachment.id
# ******************************************************************
if attachment_id not in attachment_ids:
attachment_ids.append(attachment_id)
#** Recherche du contact Facturation *******************************
SQL="""
select rp.name, rp.email, rp.active
from res_partner rp inner join is_type_contact itc on rp.is_type_contact=itc.id
where
rp.parent_id="""+str(partner_id)+""" and
itc.name='Facturation' and
rp.active='t'
"""
cr.execute(SQL)
result = cr.fetchall()
emails_to=[]
for row in result:
email_to = str(row[1])
if email_to=='None':
raise Warning(u"Mail du contact de facturation non renseigné pour le client "+partner.is_code+u'/'+partner.is_adr_code+" !")
emails_to.append(row[0]+u' <'+email_to+u'>')
if len(emails_to)==0:
raise Warning(u"Aucun contact de type 'Facturation' trouvé pour le client "+partner.is_code+u'/'+partner.is_adr_code+" !")
#*******************************************************************
email_cc = user.name+u' <'+user.email+u'>'
email_to = u','.join(emails_to)
#email_to = email_cc
email_from = email_cc
subject = u'Facture Plastigray pour '+partner.name
#subject = u'Facture Plastigray pour '+partner.name+u' | |
<reponame>bellrichm/weather
""" The uploader """
# pylint: disable=invalid-name
# pylint: enable=invalid-name
import json
import sys
import time
import jwt
import weewx.restx
#from weeutil.weeutil import to_int
#import six
from six.moves import urllib
try:
# Python 2
from Queue import Queue
except ImportError:
# Python 3
from queue import Queue
try:
# Test for new-style weewx logging by trying to import weeutil.logger
import weeutil.logger # pylint: disable=unused-import
import logging
LOG = logging.getLogger(__name__)
def logdbg(msg):
""" Log debug """
print(msg)
LOG.debug(msg)
def loginf(msg):
""" Log info """
print(msg)
LOG.info(msg)
def logerr(msg):
""" Log error """
print(msg)
LOG.error(msg)
except ImportError:
# Old-style weewx logging
import syslog
def logmsg(level, msg):
""" Log the message """
print(msg)
syslog.syslog(level, 'weert: %s:' % msg)
def logdbg(msg):
""" Log debug """
logmsg(syslog.LOG_DEBUG, msg)
def loginf(msg):
""" Log info """
logmsg(syslog.LOG_INFO, msg)
def logerr(msg):
""" Log error """
logmsg(syslog.LOG_ERR, msg)
class RMBArchiveUpload(weewx.restx.StdRESTful):
""" The uploader class """
def __init__(self, engine, config_dict):
super(RMBArchiveUpload, self).__init__(engine, config_dict)
loginf("init RMBArchiveUpload")
archive_upload_manager_dict = weewx.manager.get_manager_dict(
config_dict['DataBindings'],
config_dict['Databases'],
'RMBArchiveUpload_binding')
self.archive_upload_DBM = weewx.manager.open_manager(archive_upload_manager_dict, # pylint: disable=invalid-name
initialize=True)
site_dict = weewx.restx.check_enable(config_dict, 'RmbUpload', 'host', 'user', 'password')
if site_dict is None:
return
manager_dict = weewx.manager.get_manager_dict_from_config(
config_dict, 'wx_binding')
self.archive_queue = Queue()
self.archive_thread = RMBArchiveUploadThread(self.archive_queue,
manager_dict,
archive_upload_manager_dict,
**site_dict)
self.archive_thread.start()
self.bind(weewx.NEW_ARCHIVE_RECORD, self.new_archive_record)
def new_archive_record(self, event):
" New archive record callback "
# Adding to DB here, incase the queuing fails
self.archive_upload_DBM.getSql('INSERT INTO %s ("dateTime", "run_dateTime") VALUES (?, ?)' %
self.archive_upload_DBM.table_name,
(str(event.record["dateTime"]),
str(int(time.time()))))
self.archive_queue.put(event.record)
loginf("new archive record %s" % event)
class RMBArchiveUploadThread(weewx.restx.RESTThread):
""" The uploader thread """
def __init__(self, queue,
manager_dict,
archiveUpload_manager_dict,
host,
user, password,
#measurement,
#platform, stream,
## loop_filters,
protocol_name="RMBArchiveUploadThread",
post_interval=None, max_backlog=sys.maxsize, stale=None,
log_success=True, log_failure=True,
timeout=10, max_tries=3, retry_wait=5, retry_login=3600,
softwaretype="weewx-%s" % weewx.__version__,
skip_upload=False):
""" Initializer for RMBArchiveUploadThread """
super(RMBArchiveUploadThread, self).__init__(queue,
manager_dict=manager_dict,
protocol_name=protocol_name,
post_interval=post_interval,
max_backlog=max_backlog,
stale=stale,
log_success=log_success,
log_failure=log_failure,
timeout=timeout,
max_tries=max_tries,
retry_wait=retry_wait,
retry_login=retry_login,
softwaretype=softwaretype,
skip_upload=skip_upload)
self.host = host
#self.port = to_int(port)
#self.user = user
#self.password = password
self.interval = 300
self.archive_upload_manager_dict = archiveUpload_manager_dict
self.archive_upload_db_manager = None
self.login = RMBArchiveUploadLogin(Queue(), self.host, user, password)
self.jwt = self.login.process_record(None, None)
#self.measurement = measurement
#self.platform = platform
#self.stream = stream
loginf("init RMBArchiveUploadThread")
def process_record(self, record, dbmanager):
# Constructor is a different thread, so have to do this here.
if not self.archive_upload_db_manager:
self.archive_upload_db_manager = weewx.manager.open_manager(
self.archive_upload_manager_dict)
if self.jwt['decoded']['exp'] < int(time.time()) + self.interval:
self.jwt = self.login.process_record(None, None)
super().process_record(record, dbmanager)
curr_date_time = int(time.time())
self.archive_upload_db_manager.getSql(
'UPDATE %s SET upload_dateTime = ? WHERE dateTime= ?' %
self.archive_upload_db_manager.table_name,
(str(curr_date_time),
record["dateTime"]))
loginf("process_record")
def format_url(self, _):
"""Override and return the URL used to post to the server"""
url = "http://%s/api/observations" % self.host
return url
def get_request(self, url):
_request = super().get_request(url)
_request.add_header("authorization", "bearer " + self.jwt['encoded'])
return _request
def get_post_body(self, record):
record['dateTime'] = int(record['dateTime'])
record['usUnits'] = int(record['usUnits'])
record['interval'] = int(record['interval'])
return(json.dumps(record), "application/json")
def handle_exception(self, e, count):
logdbg("%s: Failed upload attempt %d: %s" % (self.protocol_name, count, e))
#super().handle_exception(e, count)
print(type(e))
if isinstance(e, urllib.error.HTTPError):
response = e.file
response_body = response.read()
logerr("%s: Failed upload attempt %d: %s" % (self.protocol_name, count, response_body))
print("exception")
class RMBArchiveUploadLogin(weewx.restx.RESTThread):
""" The login thread """
def __init__(self, queue,
host,
user, password,
#measurement,
#platform, stream,
## loop_filters,
protocol_name="RMBArchiveUploadLogin",
post_interval=None, max_backlog=sys.maxsize, stale=None,
log_success=True, log_failure=True,
timeout=10, max_tries=3, retry_wait=5, retry_login=3600,
softwaretype="weewx-%s" % weewx.__version__,
skip_upload=False):
""" Initializer for RMBArchiveUploadLogin """
super(RMBArchiveUploadLogin, self).__init__(queue,
protocol_name=protocol_name,
post_interval=post_interval,
max_backlog=max_backlog,
stale=stale,
log_success=log_success,
log_failure=log_failure,
timeout=timeout,
max_tries=max_tries,
retry_wait=retry_wait,
retry_login=retry_login,
softwaretype=softwaretype,
skip_upload=skip_upload)
self.host = host
#self.port = to_int(port)
self.user = user
self.password = password
self.jwt = {}
self.jwt['encoded'] = None
self.jwt['decoded'] = {}
self.jwt['decoded']['exp'] = 0
print("init login")
def process_record(self, record, dbmanager):
loginf("process_record")
# ... format the URL, using the relevant protocol ...
_url = self.format_url(record)
# ... get the Request to go with it...
_request = self.get_request(_url)
# ... get any POST payload...
_payload = self.get_post_body(record)
# ... add a proper Content-Type if needed...
if _payload:
_request.add_header('Content-Type', _payload[1])
data = _payload[0]
else:
data = None
# ... then, finally, post it
self.post_with_retries(_request, data)
return self.jwt
def format_url(self, _):
"""Override and return the URL used to post to the WeeRT server"""
#url = "%s %s %s" % (self.host, self.port, self.measurement)
url = "http://%s/api/user/login" % self.host
return url
def get_post_body(self, record):
data = {}
data['UserName'] = self.user
data['Password'] = <PASSWORD>
return(json.dumps(data), "application/json")
def check_response(self, response):
# Get the token
response_body = response.read()
data = json.loads(response_body)
self.jwt['encoded'] = data['jsonWebToken']
self.jwt['decoded'] = jwt.decode(data['jsonWebToken'], verify=False)
print("check response %s" % self.jwt)
def handle_exception(self, e, count):
logdbg("%s: Failed upload attempt %d: %s" % (self.protocol_name, count, e))
#super().handle_exception(e, count)
print(type(e))
if isinstance(e, urllib.error.HTTPError):
response = e.file
response_body = response.read()
logerr("%s: Failed upload attempt %d: %s" % (self.protocol_name, count, response_body))
if __name__ == '__main__':
import argparse
import copy
import os
import configobj
from weewx.engine import StdEngine
def main():
""" mainline """
print("in main")
parser = argparse.ArgumentParser()
parser.add_argument("config_file")
options = parser.parse_args()
config_path = os.path.abspath(options.config_file)
config_dict = configobj.ConfigObj(config_path, file_error=True)
min_config_dict = {
'Station': {
'altitude': [0, 'foot'],
'latitude': 0,
'station_type': 'Simulator',
'longitude': 0
},
'Simulator': {
'driver': 'weewx.drivers.simulator',
},
'Engine': {
'Services': {}
}
}
engine = StdEngine(min_config_dict)
#service = RMBArchiveUpload(engine, config_dict)
db_binder = weewx.manager.DBBinder(config_dict)
data_binding = config_dict['StdArchive'].get('data_binding', 'wx_binding')
dbmanager = db_binder.get_manager(data_binding)
record = {}
record['dateTime'] = time.time()
record['usUnits'] = 1
record['interval'] = 5
records = catch_up2(config_dict)
for record in records:
print(record)
#service.archive_thread.process_record(record, dbmanager)
print("done")
def catch_up2(config_dict):
dictionary = copy.deepcopy(config_dict)
dictionary['StdRESTful']['RmbUpload']['max_tries'] = '3'
dictionary['StdRESTful']['RmbUpload']['retry_wait'] = '5'
site_dict = weewx.restx.check_enable(dictionary, 'RmbUpload', 'user', 'password')
if site_dict is None:
return
archive_upload_manager_dict = weewx.manager.get_manager_dict(
config_dict['DataBindings'], config_dict['Databases'], 'RMBArchiveUpload_binding')
site_dict['archiveUpload_manager_dict'] = archive_upload_manager_dict
archive_upload_manager = weewx.manager.open_manager(archive_upload_manager_dict)
select_sql = "SELECT dateTime from archive where archive.upload_dateTime is NULL"
error_dates = archive_upload_manager.genSql(select_sql)
lister = []
for error_date in error_dates:
lister.append(error_date[0])
placeholder = '?' # For SQLite. See DBAPI paramstyle.
placeholders = ', '.join(placeholder for unused in lister)
query = 'SELECT * FROM archive WHERE dateTime IN (%s)' % placeholders
#cursor.execute(query, error_dates)
db_binder = weewx.manager.DBBinder(config_dict)
data_binding = config_dict['StdArchive'].get('data_binding', 'wx_binding')
dbmanager = db_binder.get_manager(data_binding)
records = dbmanager.genSql(query, lister)
print("end")
return records
def catch_up(config_dict):
""" process any ones that have errored
still need to think about ones that were neverr logged
probably need to retrieve observations from server and compare to db
similar to wunderfixer """
# Unfortunately, this is dependent on the underlying databases being SQLite
dictionary = copy.deepcopy(config_dict)
dictionary['StdRESTful']['RmbUpload']['max_tries'] = '3'
dictionary['StdRESTful']['RmbUpload']['retry_wait'] = '5'
attach_sql = "ATTACH DATABASE \
'/home/richbell/development/weewx-code/weewx/archive-replica/weewx.sdb' \
as weewx;"
# gets all archive data that does not have a record stating int was processed
# where_clause = "WHERE weewx.archive.dateTime IN \
# (SELECT dateTime FROM archive where archive.upload_dateTime is NULL) "
# gets all archive data that has not been marked as processed
# where_clause = "WHERE weewx.archive.dateTime NOT IN (SELECT dateTime FROM archive) "
select_sql = "SELECT \
`dateTime`, `usUnits`, `interval`, `barometer`, `pressure`, `altimeter`, `inTemp`, `outTemp`, \
`inHumidity`, `outHumidity`, `windSpeed`, `windDir`, `windGust`, `windGustDir`, \
`rainRate`, `rain`, `dewpoint`, `windchill`, `heatindex`, `ET`, `radiation`, `UV`, \
`extraTemp1`, `extraTemp2`, `extraTemp3`, `soilTemp1`, `soilTemp2`, `soilTemp3`, `soilTemp4`, \
`leafTemp1`, `leafTemp2`, `extraHumid1`, `extraHumid2`, `soilMoist1`, `soilMoist2`, `soilMoist3`, `soilMoist4`, \
`leafWet1`, `leafWet2`, `rxCheckPercent`, `txBatteryStatus`, `consBatteryVoltage`, `hail`, `hailRate`, \
`heatingTemp`, `heatingVoltage`, `supplyVoltage`, `referenceVoltage`, \
`windBatteryStatus`, `rainBatteryStatus`, `outTempBatteryStatus`, `inTempBatteryStatus` \
FROM weewx.archive \
WHERE weewx.archive.dateTime IN (SELECT dateTime FROM archive where archive.upload_dateTime is NULL) \
OR weewx.archive.dateTime IN (SELECT dateTime FROM archive where archive.upload_dateTime is NULL) \
ORDER BY dateTime ASC ;"
site_dict = weewx.restx.check_enable(dictionary, 'RmbUpload', 'user', 'password')
if site_dict is None:
return
archive_upload_manager_dict = weewx.manager.get_manager_dict(
config_dict['DataBindings'], config_dict['Databases'], 'RMBArchiveUpload_binding')
site_dict['archiveUpload_manager_dict'] = archive_upload_manager_dict
archive_upload_manager = weewx.manager.open_manager(archive_upload_manager_dict)
archive_upload_manager.getSql(attach_sql)
data_records = archive_upload_manager.genSql(select_sql)
i = 0
archive_records = []
for data_record in data_records:
archive_records.append({})
archive_records[i]['dateTime'] = data_record[0]
archive_records[i]['usUnits'] = data_record[1]
archive_records[i]['interval'] = data_record[2]
archive_records[i]['barometer'] = data_record[3]
archive_records[i]['pressure'] = data_record[4]
archive_records[i]['altimeter'] = data_record[5]
archive_records[i]['inTemp'] = data_record[6]
archive_records[i]['outTemp'] = data_record[7]
archive_records[i]['inHumidity'] = data_record[8]
archive_records[i]['outHumidity'] = data_record[9]
archive_records[i]['windSpeed'] = data_record[10]
archive_records[i]['windDir'] = data_record[11]
archive_records[i]['windGust'] = data_record[12]
archive_records[i]['windGustDir'] = data_record[13]
archive_records[i]['rainRate'] = data_record[14]
archive_records[i]['rain'] = data_record[15]
archive_records[i]['dewpoint'] = data_record[16]
archive_records[i]['windchill'] = data_record[17]
archive_records[i]['heatindex'] = data_record[18]
archive_records[i]['ET'] = data_record[19]
archive_records[i]['radiation'] = data_record[20]
archive_records[i]['UV'] = data_record[21]
archive_records[i]['extraTemp1'] = data_record[22]
archive_records[i]['extraTemp2'] = data_record[23]
archive_records[i]['extraTemp3'] = data_record[24]
archive_records[i]['soilTemp1'] = data_record[25]
archive_records[i]['soilTemp2'] = data_record[26]
archive_records[i]['soilTemp3'] = data_record[27]
archive_records[i]['soilTemp4'] = data_record[28]
archive_records[i]['leafTemp1'] = data_record[29]
archive_records[i]['leafTemp2'] = data_record[30]
archive_records[i]['extraHumid1'] = data_record[31]
archive_records[i]['extraHumid2'] = data_record[32]
archive_records[i]['soilMoist1'] = data_record[33]
archive_records[i]['soilMoist2'] = data_record[34]
archive_records[i]['soilMoist3'] = data_record[35]
archive_records[i]['soilMoist4'] = data_record[36]
archive_records[i]['leafWet1'] = data_record[37]
archive_records[i]['leafWet2'] = data_record[38]
archive_records[i]['rxCheckPercent'] = data_record[39]
| |
import re
import numpy as np
import warnings
import copy
from .utils import is_pos_int, is_non_neg_int, \
is_proportion, is_positive, is_non_negative, \
inherits
class layout:
def __init__(self,
ncol=None,
nrow=None,
byrow=None,
rel_widths=None,
rel_heights=None,
design=None
):
"""
layout class to store information about arangement of patches found
in `cow.patch`.
Arguments
---------
ncol : integer
Integer for the number of columns to arrange the the patches in.
The default is None (which avoids conflicts if a value for
`design` is provided). If ``ncol`` is None but ``nrow`` is not,
then ``ncol`` will default to the minimum number of columns to
make sure that all patches can be visualized.
nrow : integer
Integer for the number of rows to arrange the the patches in.
The default is None (which avoids conflicts if a value for
``design`` is provided). If ``nrow`` is None but ``ncol`` is not,
then ``nrow`` will default to the minimum number of rows to make
sure that all patches can be visualized.
byrow : boolean
If ``ncol`` and/or ``nrow`` is included, then this boolean
indicates if the patches should be ordered by row (default if
``byrow`` is None or when parameter is ``True``) or by column (if
``byrow`` was ``False``).
design : np.array (float based) or str
Specification of the location of each patch in the arrangement.
Can either be a float numpy array with integers between 0 and
the number of patches to arrange, or a text string that captures
similar ideas to the array approach but uses capital alphabetical
characters (A-Z) to indicate each figure. More information is in
Notes.
rel_widths : list, np vector or tuple
Numerical vector of relative columns widths. This not required,
the default would be ``np.ones(ncol)`` or
``np.ones(design.shape[0])``. Note that this is a relative sizing
and the values are only required to be non-negative, non-zero
values, for example ``[1,2]`` would would make the first column
twice as wide as the second column.
rel_heights : list or tuple
Numerical vector of relative row heights. This not required,
the default would be ``np.ones(nrow)`` or
``np.ones(design.shape[1])``. Note that this is a relative sizing
and the values are only required to be non-negative, non-zero
values, for example ``[1,2]`` would would make the first row twice
as tall as the second row.
Notes
-----
*Design*
The ``design`` parameter expects specific input.
1. If the ``design`` is input as a numpy array, we expect it to have
integers only (0 to # patches-1). It is allowed to have ``np.nan``
values if certain "squares" of the layout are not covered by others
(the covering is defined by the value ordering). Note that we won't
check for overlap and ``np.nan`` is not enforced if another patches'
relative (min-x,min-y) and (max-x, max-y) define a box over that
``np.nan``'s area.
An example of a design of the numpy array form could look like
>>> my_np_design = np.array([[1,1,2],
... [3,3,2],
... [3,3,np.nan]])
2. if the ``design`` parameter takes in a string, we expect it to have
a structure such that each line (pre ``\\\\n``) contains the same number
of characters, and these characters must come from the first
(number of patches) capital alphabetical characters or the ``\#`` or
``.`` sign to indicate an empty square. Similar arguments w.r.t.
overlap and the lack of real enforcement for empty squares applies
(as in 1.).
An example of a design of the string form could look like
>>> my_str_design = \"\"\"
... AAB
... CCB
... CC\#
... \"\"\"
or
>>> my_str_design = \"\"\"
... AAB
... CCB
... CC.
... \"\"\"
See the `Layout guide`_ for more detailed examples of functionality.
.. _Layout guide: https://benjaminleroy.github.io/cowpatch/guides/Layout.html
*Similarities to our `R` cousins:*
This layout function is similar to `patchwork\:\:plot_layout <https://patchwork.data-imaginist.com/reference/plot_layout.html>`_
(with a special node to ``design`` parameter) and helps perform similar
ideas to `gridExtra\:\:arrangeGrob <https://cran.r-project.org/web/packages/gridExtra/vignettes/arrangeGrob.html>`_'s
``layout_matrix`` parameter, and `cowplot\:\:plot_grid <https://wilkelab.org/cowplot/reference/plot_grid.html>`_'s
``rel_widths`` and ``rel_heights`` parameters.
Examples
--------
>>> # Necessary libraries for example
>>> import numpy as np
>>> import cowpatch as cow
>>> import plotnine as p9
>>> import plotnine.data as p9_data
>>> g0 = p9.ggplot(p9_data.mpg) +\\
... p9.geom_bar(p9.aes(x="hwy")) +\\
... p9.labs(title = 'Plot 0')
>>> g1 = p9.ggplot(p9_data.mpg) +\\
... p9.geom_point(p9.aes(x="hwy", y = "displ")) +\\
... p9.labs(title = 'Plot 1')
>>> g2 = p9.ggplot(p9_data.mpg) +\\
... p9.geom_point(p9.aes(x="hwy", y = "displ", color="class")) +\\
... p9.labs(title = 'Plot 2')
>>> g3 = p9.ggplot(p9_data.mpg[p9_data.mpg["class"].isin(["compact",
... "suv",
... "pickup"])]) +\\
... p9.geom_histogram(p9.aes(x="hwy"),bins=10) +\\
... p9.facet_wrap("class")
>>> # design matrix
>>> vis_obj = cow.patch(g1,g2,g3)
>>> vis_obj += cow.layout(design = np.array([[0,1],
... [2,2]]))
>>> vis_obj.show()
>>> # design string
>>> vis_obj2 = cow.patch(g1,g2,g3)
>>> vis_obj2 += cow.layout(design = \"\"\"
... AB
... CC
... \"\"\")
>>> vis_obj2.show()
>>> # nrow, ncol, byrow
>>> vis_obj3 = cow.patch(g0,g1,g2,g3)
>>> vis_obj3 += cow.layout(nrow=2, byrow=False)
>>> vis_obj3.show()
>>> # rel_widths/heights
>>> vis_obj = cow.patch(g1,g2,g3)
>>> vis_obj += cow.layout(design = np.array([[0,1],
... [2,2]]),
... rel_widths = np.array([1,2]))
>>> vis_obj.show()
See also
--------
area : object class that helps ``layout`` define where plots will go
in the arangement
patch : fundamental object class which is combined with ``layout`` to
defin the overall arangement of plots
"""
if design is not None:
if ncol is not None or nrow is not None:
warnings.warn("ncol and nrow are overridden"+\
" by the design parameter")
if isinstance(design, np.ndarray):
if len(design.shape) == 1:
warnings.warn("design matrix is 1d,"+\
" will be seen as a 1-row design")
nrow, ncol = 1, design.shape[0]
design = design.reshape((1,-1))
else:
nrow, ncol = design.shape
if isinstance(design, str):
# convert design to desirable structure matrix structure
design = self._design_string_to_mat(design)
nrow, ncol = design.shape
if ncol is None:
if rel_widths is not None:
if isinstance(rel_widths, np.ndarray):
ncol = rel_widths.shape[0]
if isinstance(rel_widths, list) or \
isinstance(rel_widths, tuple):
ncol = len(rel_widths)
rel_widths = np.array(rel_widths)
if nrow is None:
if rel_heights is not None:
if isinstance(rel_heights, np.ndarray):
nrow = rel_heights.shape[0]
if isinstance(rel_heights, list) or \
isinstance(rel_heights, tuple):
nrow = len(rel_heights)
rel_heights= np.array(rel_heights)
if rel_widths is None and rel_heights is None:
assert not (ncol is None and nrow is None), \
"need some parameters to not be none in design initialization"
if rel_widths is None and ncol is not None:
rel_widths = np.ones(ncol)
if rel_heights is None and nrow is not None:
rel_heights = np.ones(nrow)
if rel_heights is not None:
rel_heights = np.array(rel_heights)
if rel_widths is not None:
rel_widths = np.array(rel_widths)
# if design is None:
# if byrow is None or byrow:
# order_str = "C"
# else:
# order_str = "F"
# design = np.arange(ncol*nrow,dtype = int).reshape((nrow, ncol),
# order = order_str)
if design is not None:
byrow = None
# ncol/nrow and rel_widths/rel_heights correct alignment
if ncol is not None and rel_widths is not None:
if ncol != rel_widths.shape[0]:
raise ValueError("ncol (potentially from the design) and "+\
"rel_widths disagree on size of layout")
if nrow is not None and rel_heights is not None:
if nrow != rel_heights.shape[0]:
raise ValueError("nrow (potentially from the design) and "+\
"rel_heights disagree on size of layout")
self.ncol = ncol
self.nrow = nrow
self.__design = design
self.byrow = byrow
self.rel_widths = rel_widths
self.rel_heights = rel_heights
self.num_grobs = self._assess_mat(design)
def _design_string_to_mat(self, design):
"""
Internal function to convert design string into a matrix
Arguments
---------
design : str
design in a string format
Returns
-------
design : np.array integer
design in np.array format
"""
design_clean = re.sub(" *\t*", "", design) # removing spaces and tabs
design_clean = re.sub("^\n*", "", design_clean) # remove leading nl
design_clean = re.sub("\n*$", "", design_clean) # remove following nl
row_info = re.split("\n", design_clean)
ncol_lengths = np.unique([len(x) for x in row_info])
if ncol_lengths.shape != (1,):
raise ValueError("expect all rows in design to have the same "+\
"number of entries, use # for an empty space "+\
"if using a string format.")
ncol = int(ncol_lengths)
nrow = len(re.findall("\n", design)) + 1
design = np.array([[ ord(val)-65
if not np.any([val == x for x in ["#","."]])
else np.nan
for | |
<reponame>PFalkowski/UnpackValidateEegData
import unittest
import pandas as pd
import EegSignalProcessing as eeg
import numpy as np
import matplotlib.pyplot as plt
import numpy.fft as fft
import mne
import os
import json
from pandas.testing import assert_frame_equal
class UnitTestHelper:
@staticmethod
def assert_dict_almost_equal(actual, expected, decimal = 7):
for key in expected.keys():
if (isinstance(actual[key], dict)):
UnitTestHelper.assert_dict_almost_equal(actual[key], expected[key], decimal)
else:
np.testing.assert_almost_equal(actual[key], expected[key], decimal)
class Test_File(unittest.TestCase):
def test_ctor_ThrowsWhenNoFile(self):
with self.assertRaises(ValueError):
eeg.File("fileThatDoesNotExist.txt")
def test_ctor_SetsVariables(self):
tested = eeg.File("Test/fileThatExists.txt")
self.assertTrue(tested.fullFilePath.endswith("Test/fileThatExists.txt"))
self.assertEqual("fileThatExists", tested.nameWithoutExtension)
self.assertFalse(tested.pathWithoutFileName.endswith("Test/fileThatExists.txt"))
def test_ComputeSha256(self):
file = eeg.File("Test/TestSub01_TestSession_testCondition.vhdr")
actual = file.ComputeFileSha256()
expected = "aed7686f60db75fec3016e136f7bdb73a0c8dc6ca57bb55051502647528b0974"
self.assertEqual(expected, actual)
def test_Validate_valid(self):
file = eeg.File("Test/TestSub01_TestSession_testCondition.vhdr")
shaDigest = "aed7686f60db75fec3016e136f7bdb73a0c8dc6ca57bb55051502647528b0974"
actual = file.Validate(shaDigest)
self.assertTrue(actual)
def test_Validate_invalid(self):
file = eeg.File("Test/TestSub01_TestSession_testCondition.vhdr")
shaDigest = "aed7686f60db75fec3016e136f7bdb73a0c8dc6ca57bb55051502647528b0973"
actual = file.Validate(shaDigest)
self.assertFalse(actual)
def test_Validate_invalid(self):
tested = eeg.File("Test/fileThatExists.txt")
actual = len(tested.GetAllLines())
expected = 3
self.assertEqual(expected, actual)
class Test_ChecksumFile(unittest.TestCase):
def test_ctor_ThrowsWhenNoFile(self):
with self.assertRaises(ValueError):
eeg.ChecksumFile("fileThatDoesNotExist.txt")
def test_GetChecksumDictionary(self):
file = eeg.ChecksumFile("Test\Sub0x - checksums.txt")
actual = file.GetChecksumDictionary()
expected = {'S01': 'F6t', 'S02': 'A4t'}
self.assertEqual(expected, actual)
class Test_EegFile(unittest.TestCase):
def test_ctor_ThrowsWhenNoFile(self):
with self.assertRaises(ValueError):
eeg.EegFile("fileThatDoesNotExist.txt")
def test_ctor_SetsVariables(self):
eegFile = eeg.EegFile("Test/TestSub01_TestSession_testCondition.vhdr")
actual = eegFile.subject
expected = "TestSub01"
self.assertEqual(expected, actual)
actual = eegFile.session
expected = "TestSession"
self.assertEqual(expected, actual)
actual = eegFile.condition
expected = "testCondition"
self.assertEqual(expected, actual)
def test_AsDataFrame_withoutLabels(self):
eegFile = eeg.EegFile("Test/TestSub01_TestSession_testCondition.vhdr")
actual = eegFile.AsDataFrame(False)
self.assertEqual((6553, 128), actual.shape)
def test_AsDataFrame_withLabels(self):
eegFile = eeg.EegFile("Test/TestSub01_TestSession_testCondition.vhdr")
actual = eegFile.AsDataFrame(True)
self.assertEqual((6553, 133), actual.shape)
def test_SaveToCsv_withLabelsNoExtensionRelativePath(self):
eegFile = eeg.EegFile("Test/TestSub01_TestSession_testCondition.vhdr")
outputPath = "Test/test_SaveToCsv_withLabels.csv"
eegFile.SaveToCsv(outputPath)
self.assertTrue(os.path.isfile(outputPath))
os.remove(outputPath)
#def test_Fft(self):
# chName = "ECoG_ch003"
# eegFile = eeg.EegFile("Test/100HzTest.vhdr")
# eegFile.plotSpectrum()
# data = eegFile.GetChannel(chName)
# #data = eegFile.AsDataFrame()
# plt.plot(data)
# plt.show()
# bands = eegFile.GetAverageBandpower()
# eeg.EegFile.PlotBands(bands)
# ## Perform FFT WITH SCIPY
# signalFFT = np.fft.rfft(data)
# ## Get Power Spectral Density
# signalPSD = np.abs(signalFFT) ** 2
# ## Get frequencies corresponding to signal PSD
# fftFreq = np.fft.rfftfreq(len(data), 1.0/eegFile.samplingRate)
# plt.figurefigsize=(8,4)
# plt.plot(fftFreq, 10*np.log10(signalPSD))
# #plt.xlim(0, 100);
# plt.xlabel('Frequency Hz')
# plt.ylabel('Power Spectral Density (dB)')
# plt.show()
# print('duh')
class Test_EegSample(unittest.TestCase):
def GetMockDataFrame(self, withLabels=True):
rawData = mne.io.read_raw_brainvision("Test/TestSub01_TestSession_testCondition.vhdr", preload=True, stim_channel=False, verbose = True)
brain_vision = rawData.get_data().T
df = pd.DataFrame(data=brain_vision, columns=rawData.ch_names)
if (withLabels):
df["Subject"] = "testSubject"
df["Session"] = "testSession"
df["Condition"] = "testAwakeCondition"
df["BinaryCondition"] = "Conscious"
df["TernaryCondition"] = "Conscious"
return df
def GetMockEegSample(self, withLabels=True):
df = self.GetMockDataFrame(withLabels)
subject = "testSubject"
session = "testSession"
condition = "testAwakeCondition"
return eeg.EegSample(df, 100, subject, session, condition)
def test_Ctor(self):
df = self.GetMockDataFrame(True)
subject = "testSubject"
session = "testSession"
condition = "testConditionAwake"
tested = eeg.EegSample(df, 78, subject, session, condition)
self.assertEqual(78, tested.samplingRate)
self.assertEqual(df.shape, tested.dataFrame.shape)
self.assertEqual(subject, tested.subject)
self.assertEqual(session, tested.session)
self.assertEqual(condition, tested.condition)
self.assertEqual("Conscious", tested.binaryCondition)
self.assertEqual("Conscious", tested.ternaryCondition)
def test_InitializeFromEegFile(self):
eegFile = eeg.EegFile("Test/TestSub01_TestSession_testCondition.vhdr")
tested = eeg.EegSample.InitializeFromEegFile(eegFile)
actual = tested.dataFrame.shape
expected = (6553, 133)
self.assertEqual(expected, actual)
def test_GetAllChannelNames(self):
eegFile = eeg.EegFile("Test/TestSub01_TestSession_testCondition.vhdr")
tested = eeg.EegSample.InitializeFromEegFile(eegFile)
actual = tested.GetAllChannelNames()
self.maxDiff = None
expected = []
for i in range(1, 129, 1):
expected.append(f"ECoG_ch{str(i).zfill(3)}")
self.assertListEqual(expected, actual)
def test_BinaryCondition_Conscious(self):
actual = eeg.EegSample.BinaryCondition("AwakeEyesOpened")
expected = "Conscious"
self.assertEqual(expected, actual)
def test_BinaryCondition_Unconscious(self):
actual = eeg.EegSample.BinaryCondition("Sleeping")
expected = "Unconscious"
self.assertEqual(expected, actual)
def test_TernaryCondition_Conscious(self):
actual = eeg.EegSample.TernaryCondition("AwakeEyesOpened")
expected = "Conscious"
self.assertEqual(expected, actual)
def test_TernaryCondition_InBetween(self):
actual = eeg.EegSample.TernaryCondition("RecoveryEyesClosed")
expected = "InBetween"
self.assertEqual(expected, actual)
def test_Ctor_RaisesErrorWhenNotPdDf(self):
eegFile = eeg.EegFile("Test/TestSub01_TestSession_testCondition.vhdr")
subject = "testSubject"
session = "testSession"
condition = "testCondition"
self.assertRaises(TypeError, eeg.EegSample, eegFile, 78, subject, session, condition)
def test_DataFrameHasLabels_True(self):
df = self.GetMockDataFrame(True)
actual = eeg.EegSample.DataFrameHasLabels(df)
self.assertTrue(actual)
def test_DataFrameHasLabels_False(self):
df = self.GetMockDataFrame(False)
actual = eeg.EegSample.DataFrameHasLabels(df)
self.assertFalse(actual)
def test_DataFrameHasLabels_CustomLabels_True(self):
df = self.GetMockDataFrame(True)
actual = eeg.EegSample.DataFrameHasLabels(df, ["Subject", "Session"])
self.assertTrue(actual)
def test_DataFrameHasLabels_CustomLabels_False(self):
df = self.GetMockDataFrame(False)
actual = eeg.EegSample.DataFrameHasLabels(df, ["Subject", "Session"])
self.assertFalse(actual)
def test_AddLabelsToDf(self):
expected = pd.DataFrame({'Test1':['AAA', 'BBB', 'CCC', 'DDD'],
'Test2':[11, 13, 17, 23],
'Subject':['testSubject','testSubject','testSubject','testSubject'],
'Session':['testSession','testSession','testSession','testSession'],
'Condition':['testAwakeCondition','testAwakeCondition','testAwakeCondition','testAwakeCondition'],
'BinaryCondition':['Conscious','Conscious','Conscious','Conscious'],
'TernaryCondition':['Conscious','Conscious','Conscious','Conscious']})
actual = pd.DataFrame({'Test1':['AAA', 'BBB', 'CCC', 'DDD'], 'Test2':[11, 13, 17, 23]})
eeg.EegSample.AddLabelsToDf(actual, 'testSubject', 'testSession', 'testAwakeCondition')
assert_frame_equal(expected.sort_index(axis=1), actual.sort_index(axis=1), check_dtype=False)
def test_GetDfWithoutLabels(self):
df = self.GetMockDataFrame(True)
dfWithDroppedLabels = eeg.EegSample.GetDfWithoutLabels(df, ["Subject", "Session"])
self.assertEqual((6553, 131), dfWithDroppedLabels.shape)
def test_GetDfWithoutLabels_WhenNoLabelsPassed(self):
df = self.GetMockDataFrame(True)
dfWithDroppedLabels = eeg.EegSample.GetDfWithoutLabels(df, [])
self.assertEqual((6553, 133), dfWithDroppedLabels.shape)
def test_GetDfWithoutLabels_WhenNoMatchingLabelsPassed(self):
df = self.GetMockDataFrame(True)
self.assertRaises(KeyError, eeg.EegSample.GetDfWithoutLabels, df, ["TheseAreNotTheLabelsYouAreLookingFor"])
def test_GetChannel(self):
tested = self.GetMockEegSample()
actual = tested.GetChannel("ECoG_ch001")
self.assertEqual((6553,), actual.shape)
def test_GetRandomSubset_WithLabels(self):
tested = self.GetMockEegSample(True)
actual = tested.GetRandomSubset(0.1, True)
expected = (int(6553 * 0.1), 133)
self.assertEqual(expected, actual.shape)
def test_GetRandomSubset_WithLabels_ThrowsWhenNoLabels(self):
tested = self.GetMockEegSample(False)
self.assertRaises(ValueError, tested.GetRandomSubset, 0.1, True)
def test_GetRandomSubset_RatioIsOne(self):
tested = self.GetMockEegSample(True)
actual = tested.GetRandomSubset(1, True)
expected = (6553, 133)
self.assertEqual(expected, actual.shape)
def test_GetRandomSubset_RatioIsZero(self):
tested = self.GetMockEegSample(True)
actual = tested.GetRandomSubset(0, True)
expected = (0, 133)
self.assertEqual(expected, actual.shape)
def test_GetRandomSubset_NoLabels(self):
tested = self.GetMockEegSample(True)
actual = tested.GetRandomSubset(0.5, False)
expected = (int(6553 * 0.5), 128)
self.assertEqual(expected, actual.shape)
def test_GetRandomSubset_NoLabels2(self):
tested = self.GetMockEegSample(False)
actual = tested.GetRandomSubset(0.5, False)
expected = (int(6553 * 0.5), 128)
self.assertEqual(expected, actual.shape)
def test_GetDataFrame_GetsLabels(self):
tested = self.GetMockEegSample(True)
actual = tested.GetDataFrame(True).shape
expected = (6553, 133)
self.assertEqual(expected, actual)
def test_GetDataFrame_NoLabels(self):
tested = self.GetMockEegSample(True)
actual = tested.GetDataFrame(False).shape
expected = (6553, 128)
self.assertEqual(expected, actual)
def test_GetAverageBandpower(self):
tested = self.GetMockEegSample(True)
actual = tested.GetAverageBandpower()
expected = {'Alpha': 0.046372396504643934, 'Beta': 0.021799368301619663, 'Delta': 0.3797795190319582, 'Gamma': 0.015256991787747547, 'Theta': 0.0961496475016523}
UnitTestHelper.assert_dict_almost_equal(actual, expected)
def test_GetAverageBandpower_CustomBands(self):
tested = self.GetMockEegSample(True)
customBands = {'0-10': (0, 10), '10-20': (10, 20), '20-30': (20, 30), '30-40': (30, 40), '40-50': (40, 50)}
actual = tested.GetAverageBandpower(customBands)
expected = {'0-10': 0.20105735820962242, '10-20': 0.02871198170077185, '20-30': 0.01866300650862033, '30-40': 0.01589383386036209, '40-50': 0.013470140130352085}
UnitTestHelper.assert_dict_almost_equal(actual, expected)
def test_GetAverageBandpowerAsDataFrame_NoLabels(self):
tested = self.GetMockEegSample(True)
actual = tested.GetAverageBandpowerAsDataFrame(False)
expected = pd.DataFrame({'Alpha': [0.046372396504643934], 'Beta': [0.021799368301619663], 'Delta': [0.3797795190319582], 'Gamma': [0.015256991787747547], 'Theta': [0.0961496475016523]})
assert_frame_equal(expected.sort_index(axis=1), actual.sort_index(axis=1), check_dtype=False)
def test_GetAverageBandpowerAsDataFrame_WithLabels(self):
tested = self.GetMockEegSample(True)
actual = tested.GetAverageBandpowerAsDataFrame(True)
expected = pd.DataFrame({
'Alpha': [0.046372396504643934],
'Beta': [0.021799368301619663],
'Delta': [0.3797795190319582],
'Gamma': [0.015256991787747547],
'Theta': [0.0961496475016523],
'Subject': ['testSubject'],
'Session': ['testSession'],
'Condition': ['testAwakeCondition'],
'BinaryCondition': ['Conscious'],
'TernaryCondition': ['Conscious']
})
assert_frame_equal(expected.sort_index(axis=1), actual.sort_index(axis=1), check_dtype=False)
def test_GetAverageChannelBandpower(self):
tested = self.GetMockEegSample(True)
actual = tested.GetAverageChannelBandpower("ECoG_ch001")
expected = {'Alpha': 0.001581301582526992, 'Beta': 0.001105882882813178, 'Delta': 0.02409971332527757, 'Gamma': 0.0008023666358686522, 'Theta': 0.002751648980509086}
UnitTestHelper.assert_dict_almost_equal(actual, expected)
def test_GetAverageBandpowerPerChannel(self):
tested = self.GetMockEegSample(False)
actual = tested.GetAverageBandpowerPerChannel()
with open("Test/test_GetAverageBandpowerPerChannel_Expected.json") as json_file:
expected = json.load(json_file)
UnitTestHelper.assert_dict_almost_equal(actual, expected)
def test_GetAverageBandpowerPerChannelAsDataFrame_NoLabels(self):
tested = self.GetMockEegSample(True)
actual = tested.GetAverageBandpowerPerChannelAsDataFrame()
with open("Test/test_GetAverageBandpowerPerChannel_Expected.json") as json_file:
expected = json.load(json_file)
expected = pd.DataFrame.from_dict(expected, orient='index')
assert_frame_equal(expected.sort_index(axis=1), actual.sort_index(axis=1), check_dtype=False)
def test_GetAverageBandpowerPerChannelAsDataFrame_NoLabels2(self):
tested = self.GetMockEegSample(False)
actual = tested.GetAverageBandpowerPerChannelAsDataFrame()
with open("Test/test_GetAverageBandpowerPerChannel_Expected.json") as json_file:
expected = json.load(json_file)
expected = pd.DataFrame.from_dict(expected, orient='index')
assert_frame_equal(expected.sort_index(axis=1), actual.sort_index(axis=1), check_dtype=False)
def test_GetAverageBandpowerPerChannelAsDataFrame_WithLabels(self):
tested = self.GetMockEegSample(True)
actual = tested.GetAverageBandpowerPerChannelAsDataFrame(True)
expected = pd.read_csv('Test/test_GetAverageBandpowerPerChannelAsDataFrame_WithLabels_Expected.csv')
assert_frame_equal(expected.sort_index(axis=1), actual.sort_index(axis=1), check_dtype=False)
def test_GetAverageBandpowerPerChannelAsDataFrame_CustomBands(self):
customBands = {'0-10': (0, 10), '10-20': (10, 20), '20-30': (20, 30), '30-40': (30, 40), '40-50': (40, 50)}
tested = self.GetMockEegSample(True)
actual = tested.GetAverageBandpowerPerChannelAsDataFrame(True, customBands)
expected = pd.read_csv('Test/test_GetAverageBandpowerPerChannelAsDataFrame_CustomBands.csv')
assert_frame_equal(expected.sort_index(axis=1), actual.sort_index(axis=1), check_dtype=False)
def test_SplitEvenly(self):
tested = self.GetMockEegSample(True)
tested = tested.SplitEvenly(10)
self.assertEqual(10, len(tested))
def test_SplitEvenly_OneSlice(self):
tested = self.GetMockEegSample(True)
tested = tested.SplitEvenly(1)
self.assertEqual(1, len(tested))
def test_SplitEvenly_ZeroSlices(self):
tested = self.GetMockEegSample(True)
self.assertRaises(ValueError, tested.SplitEvenly, 0)
def test_SplitEvenly_MoreSlicesThanRows(self):
tested = self.GetMockEegSample(True)
rowsNo = len(tested.dataFrame)
self.assertRaises(ValueError, tested.SplitEvenly, rowsNo + 1)
def test_GenerateEegBands_Step10(self):
step = 10
actual = eeg.EegSample.GenerateEegBands(step)
expected = {'0-10': (0, 10), '10-20': (10, 20), '20-30': (20, 30), '30-40': (30, 40), '40-50': (40, 50)}
self.assertDictEqual(expected, actual)
def test_GenerateEegBands_Step1(self):
step = 1
actual = eeg.EegSample.GenerateEegBands(step)
self.maxDiff = None
expected = {'0-1': (0, 1),
'1-2': (1, 2),
'10-11': (10, 11),
'11-12': (11, 12),
'12-13': (12, 13),
'13-14': (13, 14),
'14-15': (14, 15),
'15-16': (15, 16),
'16-17': (16, 17),
'17-18': (17, 18),
'18-19': (18, 19),
'19-20': (19, 20),
'2-3': (2, 3),
'20-21': (20, 21),
'21-22': (21, 22),
'22-23': (22, 23),
'23-24': (23, 24),
'24-25': (24, 25),
'25-26': (25, 26),
'26-27': (26, 27),
'27-28': (27, 28),
'28-29': (28, 29),
'29-30': (29, 30),
'3-4': (3, 4),
'30-31': (30, 31),
'31-32': (31, 32),
'32-33': (32, 33),
'33-34': (33, 34),
'34-35': (34, 35),
'35-36': (35, 36),
'36-37': (36, 37),
'37-38': (37, 38),
'38-39': (38, 39),
'39-40': (39, 40),
'4-5': (4, 5),
'40-41': (40, 41),
'41-42': (41, 42),
'42-43': (42, 43),
'43-44': (43, 44),
'44-45': (44, 45),
'5-6': (5, 6),
'6-7': (6, 7),
'7-8': (7, 8),
'8-9': (8, 9),
'9-10': (9, 10)}
self.assertDictEqual(expected, actual)
class Test_Directory(unittest.TestCase):
def test_Ctor(self):
path = "Test"
tested = eeg.Directory(path)
self.assertEqual(tested.fullPath, path)
def test_ctor_ThrowsWhenDirDoesntExist(self):
with self.assertRaises(ValueError):
eeg.Directory("D:\DirectoryThatDoesNotExist")
def test_EnumerateFiles_WithDot(self):
path = "Test\DirectoryForDirectoryClassTests"
extension = ".csv"
tested = eeg.Directory(path)
actual = tested.EnumerateFiles(extension)
expected = [os.path.join(path, f"fileB{extension}")]
self.assertEqual(expected, actual)
def test_EnumerateFiles_WithoutDot(self):
path = | |
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import filecmp
import os
import re
import shutil
import stat
import sys
import urllib2
from color import Coloring
from git_command import GitCommand
from git_config import GitConfig, IsId
from error import GitError, ImportError, UploadError
from error import ManifestInvalidRevisionError
from git_refs import GitRefs, HEAD, R_HEADS, R_TAGS, R_PUB, R_M
def _lwrite(path, content):
lock = '%s.lock' % path
fd = open(lock, 'wb')
try:
fd.write(content)
finally:
fd.close()
try:
os.rename(lock, path)
except OSError:
os.remove(lock)
raise
def _error(fmt, *args):
msg = fmt % args
print >>sys.stderr, 'error: %s' % msg
def not_rev(r):
return '^' + r
def sq(r):
return "'" + r.replace("'", "'\''") + "'"
hook_list = None
def repo_hooks():
global hook_list
if hook_list is None:
d = os.path.abspath(os.path.dirname(__file__))
d = os.path.join(d , 'hooks')
hook_list = map(lambda x: os.path.join(d, x), os.listdir(d))
return hook_list
def relpath(dst, src):
src = os.path.dirname(src)
top = os.path.commonprefix([dst, src])
if top.endswith('/'):
top = top[:-1]
else:
top = os.path.dirname(top)
tmp = src
rel = ''
while top != tmp:
rel += '../'
tmp = os.path.dirname(tmp)
return rel + dst[len(top) + 1:]
class DownloadedChange(object):
_commit_cache = None
def __init__(self, project, base, change_id, ps_id, commit):
self.project = project
self.base = base
self.change_id = change_id
self.ps_id = ps_id
self.commit = commit
@property
def commits(self):
if self._commit_cache is None:
self._commit_cache = self.project.bare_git.rev_list(
'--abbrev=8',
'--abbrev-commit',
'--pretty=oneline',
'--reverse',
'--date-order',
not_rev(self.base),
self.commit,
'--')
return self._commit_cache
class ReviewableBranch(object):
_commit_cache = None
def __init__(self, project, branch, base):
self.project = project
self.branch = branch
self.base = base
self.replace_changes = None
@property
def name(self):
return self.branch.name
@property
def commits(self):
if self._commit_cache is None:
self._commit_cache = self.project.bare_git.rev_list(
'--abbrev=8',
'--abbrev-commit',
'--pretty=oneline',
'--reverse',
'--date-order',
not_rev(self.base),
R_HEADS + self.name,
'--')
return self._commit_cache
@property
def unabbrev_commits(self):
r = dict()
for commit in self.project.bare_git.rev_list(
not_rev(self.base),
R_HEADS + self.name,
'--'):
r[commit[0:8]] = commit
return r
@property
def date(self):
return self.project.bare_git.log(
'--pretty=format:%cd',
'-n', '1',
R_HEADS + self.name,
'--')
def UploadForReview(self, people):
self.project.UploadForReview(self.name,
self.replace_changes,
people)
def GetPublishedRefs(self):
refs = {}
output = self.project.bare_git.ls_remote(
self.branch.remote.SshReviewUrl(self.project.UserEmail),
'refs/changes/*')
for line in output.split('\n'):
try:
(sha, ref) = line.split()
refs[sha] = ref
except ValueError:
pass
return refs
class StatusColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'status')
self.project = self.printer('header', attr = 'bold')
self.branch = self.printer('header', attr = 'bold')
self.nobranch = self.printer('nobranch', fg = 'red')
self.important = self.printer('important', fg = 'red')
self.added = self.printer('added', fg = 'green')
self.changed = self.printer('changed', fg = 'red')
self.untracked = self.printer('untracked', fg = 'red')
class DiffColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'diff')
self.project = self.printer('header', attr = 'bold')
class _CopyFile:
def __init__(self, src, dest, abssrc, absdest):
self.src = src
self.dest = dest
self.abs_src = abssrc
self.abs_dest = absdest
def _Copy(self):
src = self.abs_src
dest = self.abs_dest
# copy file if it does not exist or is out of date
if not os.path.exists(dest) or not filecmp.cmp(src, dest):
try:
# remove existing file first, since it might be read-only
if os.path.exists(dest):
os.remove(dest)
shutil.copy(src, dest)
# make the file read-only
mode = os.stat(dest)[stat.ST_MODE]
mode = mode & ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
os.chmod(dest, mode)
except IOError:
_error('Cannot copy file %s to %s', src, dest)
class RemoteSpec(object):
def __init__(self,
name,
url = None,
review = None):
self.name = name
self.url = url
self.review = review
class Project(object):
def __init__(self,
manifest,
name,
remote,
gitdir,
worktree,
relpath,
revisionExpr,
revisionId):
self.manifest = manifest
self.name = name
self.remote = remote
self.gitdir = gitdir
self.worktree = worktree
self.relpath = relpath
self.revisionExpr = revisionExpr
if revisionId is None \
and revisionExpr \
and IsId(revisionExpr):
self.revisionId = revisionExpr
else:
self.revisionId = revisionId
self.snapshots = {}
self.copyfiles = []
self.config = GitConfig.ForRepository(
gitdir = self.gitdir,
defaults = self.manifest.globalConfig)
if self.worktree:
self.work_git = self._GitGetByExec(self, bare=False)
else:
self.work_git = None
self.bare_git = self._GitGetByExec(self, bare=True)
self.bare_ref = GitRefs(gitdir)
@property
def Exists(self):
return os.path.isdir(self.gitdir)
@property
def CurrentBranch(self):
"""Obtain the name of the currently checked out branch.
The branch name omits the 'refs/heads/' prefix.
None is returned if the project is on a detached HEAD.
"""
b = self.work_git.GetHead()
if b.startswith(R_HEADS):
return b[len(R_HEADS):]
return None
def IsRebaseInProgress(self):
w = self.worktree
g = os.path.join(w, '.git')
return os.path.exists(os.path.join(g, 'rebase-apply')) \
or os.path.exists(os.path.join(g, 'rebase-merge')) \
or os.path.exists(os.path.join(w, '.dotest'))
def IsDirty(self, consider_untracked=True):
"""Is the working directory modified in some way?
"""
self.work_git.update_index('-q',
'--unmerged',
'--ignore-missing',
'--refresh')
if self.work_git.DiffZ('diff-index','-M','--cached',HEAD):
return True
if self.work_git.DiffZ('diff-files'):
return True
if consider_untracked and self.work_git.LsOthers():
return True
return False
_userident_name = None
_userident_email = None
@property
def UserName(self):
"""Obtain the user's personal name.
"""
if self._userident_name is None:
self._LoadUserIdentity()
return self._userident_name
@property
def UserEmail(self):
"""Obtain the user's email address. This is very likely
to be their Gerrit login.
"""
if self._userident_email is None:
self._LoadUserIdentity()
return self._userident_email
def _LoadUserIdentity(self):
u = self.bare_git.var('GIT_COMMITTER_IDENT')
m = re.compile("^(.*) <([^>]*)> ").match(u)
if m:
self._userident_name = m.group(1)
self._userident_email = m.group(2)
else:
self._userident_name = ''
self._userident_email = ''
def GetRemote(self, name):
"""Get the configuration for a single remote.
"""
return self.config.GetRemote(name)
def GetBranch(self, name):
"""Get the configuration for a single branch.
"""
return self.config.GetBranch(name)
def GetBranches(self):
"""Get all existing local branches.
"""
current = self.CurrentBranch
all = self._allrefs
heads = {}
pubd = {}
for name, id in all.iteritems():
if name.startswith(R_HEADS):
name = name[len(R_HEADS):]
b = self.GetBranch(name)
b.current = name == current
b.published = None
b.revision = id
heads[name] = b
for name, id in all.iteritems():
if name.startswith(R_PUB):
name = name[len(R_PUB):]
b = heads.get(name)
if b:
b.published = id
return heads
## Status Display ##
def PrintWorkTreeStatus(self):
"""Prints the status of the repository to stdout.
"""
if not os.path.isdir(self.worktree):
print ''
print 'project %s/' % self.relpath
print ' missing (run "repo sync")'
return
self.work_git.update_index('-q',
'--unmerged',
'--ignore-missing',
'--refresh')
rb = self.IsRebaseInProgress()
di = self.work_git.DiffZ('diff-index', '-M', '--cached', HEAD)
df = self.work_git.DiffZ('diff-files')
do = self.work_git.LsOthers()
if not rb and not di and not df and not do:
return 'CLEAN'
out = StatusColoring(self.config)
out.project('project %-40s', self.relpath + '/')
branch = self.CurrentBranch
if branch is None:
out.nobranch('(*** NO BRANCH ***)')
else:
out.branch('branch %s', branch)
out.nl()
if rb:
out.important('prior sync failed; rebase still in progress')
out.nl()
paths = list()
paths.extend(di.keys())
paths.extend(df.keys())
paths.extend(do)
paths = list(set(paths))
paths.sort()
for p in paths:
try: i = di[p]
except KeyError: i = None
try: f = df[p]
except KeyError: f = None
if i: i_status = i.status.upper()
else: i_status = '-'
if f: f_status = f.status.lower()
else: f_status = '-'
if i and i.src_path:
line = ' %s%s\t%s => %s (%s%%)' % (i_status, f_status,
i.src_path, p, i.level)
else:
line = ' %s%s\t%s' % (i_status, f_status, p)
if i and not f:
out.added('%s', line)
elif (i and f) or (not i and f):
out.changed('%s', line)
elif not i and not f:
out.untracked('%s', line)
else:
out.write('%s', line)
out.nl()
return 'DIRTY'
def PrintWorkTreeDiff(self):
"""Prints the status of the repository to stdout.
"""
out = DiffColoring(self.config)
cmd = ['diff']
if out.is_on:
cmd.append('--color')
cmd.append(HEAD)
cmd.append('--')
p = GitCommand(self,
cmd,
capture_stdout = True,
capture_stderr = True)
has_diff = False
for line in p.process.stdout:
if not has_diff:
out.nl()
out.project('project %s/' % self.relpath)
out.nl()
has_diff = True
print line[:-1]
p.Wait()
## Publish / Upload ##
def WasPublished(self, branch, all=None):
"""Was the branch published (uploaded) for code review?
If so, returns the SHA-1 hash of the last published
state for the branch.
"""
key = R_PUB + branch
if all is None:
try:
return self.bare_git.rev_parse(key)
except GitError:
return None
else:
try:
return all[key]
except KeyError:
return None
def CleanPublishedCache(self, all=None):
"""Prunes any stale published refs.
"""
if all is None:
all = self._allrefs
heads = set()
canrm = {}
for name, id in all.iteritems():
if name.startswith(R_HEADS):
heads.add(name)
elif name.startswith(R_PUB):
canrm[name] = id
for name, id in canrm.iteritems():
n = name[len(R_PUB):]
if R_HEADS + n not in heads:
self.bare_git.DeleteRef(name, id)
def | |
""" Decoder for the SQL generation problem."""
from collections import namedtuple, defaultdict
import numpy as np
import torch
import torch.nn.functional as F
from . import torch_utils
from .token_predictor import PredictionInput, PredictionInputWithSchema
import EditSQL.data_util.snippets as snippet_handler
from . import embedder
from EditSQL.data_util.vocabulary import EOS_TOK, UNK_TOK
from MISP_SQL.utils import SELECT_AGG_v2, WHERE_COL, WHERE_OP, WHERE_ROOT_TERM, GROUP_COL, HAV_AGG_v2, \
HAV_OP_v2, HAV_ROOT_TERM_v2, ORDER_AGG_v2, ORDER_DESC_ASC, ORDER_LIMIT, IUEN_v2, OUTSIDE, END_NESTED, \
helper_find_closest_bw
from MISP_SQL.utils import Hypothesis as BaseHypothesis
from EditSQL.eval_scripts.evaluation import AGG_OPS, ORDER_OPS
NEW_WHERE_OPS = ('=','>','<','>=','<=','!=','like','not in','in','between', 'not like')
NEW_SQL_OPS = ('none','intersect', 'union', 'except')
class Hypothesis(BaseHypothesis):
def __init__(self, dec_prefix, decoder_states, decoder_input):
BaseHypothesis.__init__(self, dec_prefix)
self.sql = [] # the sql tokens
self.keyword = None # the current keyword in {select, order_by, having, where, group_by}
self.nested_keywords = [] # a list of keywords
# to extend feedback to later decisions
self.avoid_items, self.confirmed_items = [], [] # a list of dicts of {semantic_tag: avoid/confirmed item list}
# decoder info
self.decoder_states = decoder_states
self.decoder_input = decoder_input
@staticmethod
def print_hypotheses(hypotheses):
for hyp in hypotheses:
print("logprob: {}, sql: {}\ntag_seq: {}\ndec_seq: {}".format(
hyp.logprob, hyp.sql, hyp.tag_seq, hyp.dec_seq))
def flatten_distribution(distribution_map, probabilities):
""" Flattens a probability distribution given a map of "unique" values.
All values in distribution_map with the same value should get the sum
of the probabilities.
Arguments:
distribution_map (list of str): List of values to get the probability for.
probabilities (np.ndarray): Probabilities corresponding to the values in
distribution_map.
Returns:
list, np.ndarray of the same size where probabilities for duplicates
in distribution_map are given the sum of the probabilities in probabilities.
"""
assert len(distribution_map) == len(probabilities)
if len(distribution_map) != len(set(distribution_map)):
idx_first_dup = 0
seen_set = set()
for i, tok in enumerate(distribution_map):
if tok in seen_set:
idx_first_dup = i
break
seen_set.add(tok)
new_dist_map = distribution_map[:idx_first_dup] + list(
set(distribution_map) - set(distribution_map[:idx_first_dup]))
assert len(new_dist_map) == len(set(new_dist_map))
new_probs = np.array(
probabilities[:idx_first_dup] \
+ [0. for _ in range(len(set(distribution_map)) \
- idx_first_dup)])
assert len(new_probs) == len(new_dist_map)
for i, token_name in enumerate(
distribution_map[idx_first_dup:]):
if token_name not in new_dist_map:
new_dist_map.append(token_name)
new_index = new_dist_map.index(token_name)
new_probs[new_index] += probabilities[i +
idx_first_dup]
new_probs = new_probs.tolist()
else:
new_dist_map = distribution_map
new_probs = probabilities
assert len(new_dist_map) == len(new_probs)
return new_dist_map, new_probs
class SQLPrediction(namedtuple('SQLPrediction',
('predictions',
'sequence',
'probability'))):
"""Contains prediction for a sequence."""
__slots__ = ()
def __str__(self):
return str(self.probability) + "\t" + " ".join(self.sequence)
class SequencePredictorWithSchema(torch.nn.Module):
""" Predicts a sequence.
Attributes:
lstms (list of dy.RNNBuilder): The RNN used.
token_predictor (TokenPredictor): Used to actually predict tokens.
"""
def __init__(self,
params,
input_size,
output_embedder,
column_name_token_embedder,
token_predictor):
super().__init__()
self.lstms = torch_utils.create_multilayer_lstm_params(params.decoder_num_layers, input_size, params.decoder_state_size, "LSTM-d")
self.token_predictor = token_predictor
self.output_embedder = output_embedder
self.column_name_token_embedder = column_name_token_embedder
self.start_token_embedding = torch_utils.add_params((params.output_embedding_size,), "y-0")
self.input_size = input_size
self.params = params
def _initialize_decoder_lstm(self, encoder_state):
decoder_lstm_states = []
for i, lstm in enumerate(self.lstms):
encoder_layer_num = 0
if len(encoder_state[0]) > 1:
encoder_layer_num = i
# check which one is h_0, which is c_0
c_0 = encoder_state[0][encoder_layer_num].view(1,-1)
h_0 = encoder_state[1][encoder_layer_num].view(1,-1)
decoder_lstm_states.append((h_0, c_0))
return decoder_lstm_states
def get_output_token_embedding(self, output_token, input_schema, snippets):
if self.params.use_snippets and snippet_handler.is_snippet(output_token):
output_token_embedding = embedder.bow_snippets(output_token, snippets, self.output_embedder, input_schema)
else:
if input_schema:
try:
assert self.output_embedder.in_vocabulary(output_token) or input_schema.in_vocabulary(output_token, surface_form=True)
if self.output_embedder.in_vocabulary(output_token):
output_token_embedding = self.output_embedder(output_token)
else:
output_token_embedding = input_schema.column_name_embedder(output_token, surface_form=True)
except AssertionError:
print("\nWARNING: output_token '{}' is not found in vocabulary!".format(output_token))
output_token_embedding = self.output_embedder(output_token) # will turn to UNK
else:
output_token_embedding = self.output_embedder(output_token)
return output_token_embedding
def get_decoder_input(self, output_token_embedding, prediction):
if self.params.use_schema_attention and self.params.use_query_attention:
decoder_input = torch.cat([output_token_embedding, prediction.utterance_attention_results.vector, prediction.schema_attention_results.vector, prediction.query_attention_results.vector], dim=0)
elif self.params.use_schema_attention:
decoder_input = torch.cat([output_token_embedding, prediction.utterance_attention_results.vector, prediction.schema_attention_results.vector], dim=0)
else:
decoder_input = torch.cat([output_token_embedding, prediction.utterance_attention_results.vector], dim=0)
return decoder_input
def forward(self,
final_encoder_state,
encoder_states,
schema_states,
max_generation_length,
snippets=None,
gold_sequence=None,
input_sequence=None,
previous_queries=None,
previous_query_states=None,
input_schema=None,
dropout_amount=0.):
""" Generates a sequence. """
index = 0
context_vector_size = self.input_size - self.params.output_embedding_size
# Decoder states: just the initialized decoder.
# Current input to decoder: phi(start_token) ; zeros the size of the
# context vector
predictions = []
sequence = []
probability = 1.
decoder_states = self._initialize_decoder_lstm(final_encoder_state)
if self.start_token_embedding.is_cuda:
decoder_input = torch.cat([self.start_token_embedding, torch.cuda.FloatTensor(context_vector_size).fill_(0)], dim=0)
else:
decoder_input = torch.cat([self.start_token_embedding, torch.zeros(context_vector_size)], dim=0)
continue_generating = True
while continue_generating:
if len(sequence) == 0 or sequence[-1] != EOS_TOK:
_, decoder_state, decoder_states = torch_utils.forward_one_multilayer(self.lstms, decoder_input, decoder_states, dropout_amount)
prediction_input = PredictionInputWithSchema(decoder_state=decoder_state,
input_hidden_states=encoder_states,
schema_states=schema_states,
snippets=snippets,
input_sequence=input_sequence,
previous_queries=previous_queries,
previous_query_states=previous_query_states,
input_schema=input_schema)
prediction = self.token_predictor(prediction_input, dropout_amount=dropout_amount)
predictions.append(prediction)
if gold_sequence:
output_token = gold_sequence[index]
output_token_embedding = self.get_output_token_embedding(output_token, input_schema, snippets)
decoder_input = self.get_decoder_input(output_token_embedding, prediction)
sequence.append(gold_sequence[index])
if index >= len(gold_sequence) - 1:
continue_generating = False
else:
assert prediction.scores.dim() == 1
probabilities = F.softmax(prediction.scores, dim=0).cpu().data.numpy().tolist()
distribution_map = prediction.aligned_tokens
assert len(probabilities) == len(distribution_map)
if self.params.use_previous_query and self.params.use_copy_switch and len(previous_queries) > 0:
assert prediction.query_scores.dim() == 1
query_token_probabilities = F.softmax(prediction.query_scores, dim=0).cpu().data.numpy().tolist()
query_token_distribution_map = prediction.query_tokens
assert len(query_token_probabilities) == len(query_token_distribution_map)
copy_switch = prediction.copy_switch.cpu().data.numpy()
# Merge the two
probabilities = ((np.array(probabilities) * (1 - copy_switch)).tolist() +
(np.array(query_token_probabilities) * copy_switch).tolist()
)
distribution_map = distribution_map + query_token_distribution_map
assert len(probabilities) == len(distribution_map)
# Get a new probabilities and distribution_map consolidating duplicates
distribution_map, probabilities = flatten_distribution(distribution_map, probabilities)
# Modify the probability distribution so that the UNK token can never be produced
probabilities[distribution_map.index(UNK_TOK)] = 0.
argmax_index = int(np.argmax(probabilities))
argmax_token = distribution_map[argmax_index]
sequence.append(argmax_token)
output_token_embedding = self.get_output_token_embedding(argmax_token, input_schema, snippets)
decoder_input = self.get_decoder_input(output_token_embedding, prediction)
probability *= probabilities[argmax_index]
continue_generating = False
if index < max_generation_length and argmax_token != EOS_TOK:
continue_generating = True
index += 1
return SQLPrediction(predictions,
sequence,
probability)
def update_tag_seq(self, keyword, token_idx, token, prob, tag_seq, sql, dec_idx):
if token in {'max', 'min', 'count', 'sum', 'avg'}:
if keyword == "select":
tag = SELECT_AGG_v2
elif keyword == "order_by":
tag = ORDER_AGG_v2
elif keyword == "having":
tag = HAV_AGG_v2
else:
raise Exception("Agg {} is invalid with keyword {}!".format(token, keyword))
agg = (token, AGG_OPS.index(token))
su = (tag, None, agg, False, [prob], dec_idx)
tag_seq.append(su)
elif token == 'distinct':
assert keyword in {"select", "order_by", "having"}
if sql[-2] != '(': # only consider cases like "count ( distinct c1 )"
return tag_seq
assert tag_seq[-1][0] in {SELECT_AGG_v2, ORDER_AGG_v2, HAV_AGG_v2} and \
tag_seq[-1][1] is None
# revise unit
su = tag_seq[-1]
su = (su[0], None, su[2], True, su[4] + [prob], su[5])
tag_seq[-1] = su
elif token_idx >= len(self.token_predictor.vocabulary): # column
if "*" in token:
if "." in token:
tab_name, col_name = token.split('.')
else:
tab_name = None
col_name = "*"
col_idx = token_idx # 0; revised 01/30
else:
tab_name, col_name = token.split('.')
col_idx = token_idx #- len(self.token_predictor.vocabulary); revised 01/30
col = (tab_name, col_name, col_idx)
if keyword in {"select", "order_by", "having"}:
if len(tag_seq) and tag_seq[-1][0] in {SELECT_AGG_v2, ORDER_AGG_v2, HAV_AGG_v2} and \
tag_seq[-1][1] is None:
su = tag_seq[-1]
su = (su[0], col, su[2], su[3], su[4] + [prob], su[5])
tag_seq[-1] = su
else:
if keyword == "select":
tag = SELECT_AGG_v2
elif keyword == "order_by":
tag = ORDER_AGG_v2
else:
assert keyword == "having"
tag = HAV_AGG_v2
su = (tag, col, ("none_agg", AGG_OPS.index("none")), False, [prob], dec_idx)
tag_seq.append(su)
else:
if keyword == "where":
tag = WHERE_COL
elif keyword == "group_by":
tag = GROUP_COL
else:
raise Exception("Col {} is invalid with keyword {}!".format(token, keyword))
su = (tag, col, prob, dec_idx)
tag_seq.append(su)
elif token in list(NEW_WHERE_OPS) + ['not']: # ('=','>','<','>=','<=','!=','like','not in','in','between', 'not like')
if (token == "=" and sql[-2] in {'<', '>'}) or \
(token == "in" and sql[-2] == "not") or \
(token == "like" and sql[-2] == "not"):
assert tag_seq[-1][0] in {WHERE_OP, HAV_OP_v2}
if token == "=":
op_name = "".join(sql[-2:])
else:
op_name = " ".join(sql[-2:])
op = (op_name, NEW_WHERE_OPS.index(op_name))
su = tag_seq[-1]
avg_prob = np.exp((np.log(prob) + np.log(su[3])) / 2)
tag_seq[-1] = (su[0], su[1], op, avg_prob, su[4])
else:
if keyword == "where":
tag = WHERE_OP
assert tag_seq[-1][0] == WHERE_COL
col_agg = (tag_seq[-1][1],)
else:
assert keyword == "having"
tag = HAV_OP_v2
assert tag_seq[-1][0] == HAV_AGG_v2
col_agg = (tag_seq[-1][1], tag_seq[-1][2], tag_seq[-1][3])
if token == 'not':
op = None
else:
op = (token, NEW_WHERE_OPS.index(token))
su = (tag, col_agg, op, prob, dec_idx)
tag_seq.append(su)
elif token == "value":
if keyword == "where":
op_tag = WHERE_OP
tag = WHERE_ROOT_TERM
else:
assert keyword == "having"
op_tag = HAV_OP_v2
tag = HAV_ROOT_TERM_v2
op_pos = helper_find_closest_bw(tag_seq, len(tag_seq) - 1, tgt_name=op_tag)
assert op_pos != -1
if tag_seq[op_pos][2][0] == "between" and " ".join(sql[-4:]) == "between value and value":
return tag_seq
su = (tag, tag_seq[op_pos][1], tag_seq[op_pos][2], 'terminal', prob, dec_idx)
tag_seq.append(su)
elif token == "(":
if sql[-2] in {'max', 'min', 'count', 'sum', 'avg'}:
assert tag_seq[-1][0] in {SELECT_AGG_v2, ORDER_AGG_v2, HAV_AGG_v2} # "count ( c1 )"
su = tag_seq[-1]
su | |
Column('Date_Time_Stamp',
DateTime,
nullable=True,
doc='Date Time Stamp',
comment='N/A'),
Column('Cntr_Num',
String(length=10),
nullable=True,
index=True,
doc='Contract Number',
comment='The key to the contract record.'),
Column('Desc',
String(length=30),
nullable=True,
doc='Description',
comment='The description of this contract'),
Column('Eff_Date',
Date,
nullable=True,
doc='Effective Date',
comment='The date this contract becomes effective. If blank, date check is not used.'),
Column('Exp_Date',
Date,
nullable=True,
doc='Expiration Date',
comment='The date that this contract ends. If blank, this date check is not used and the'
'contract will never expire.'),
Column('Vend_Num',
String(length=7),
nullable=True,
index=True,
doc='Vendor Number',
comment='The vendor number that is associated with this contract'),
Column('Vend_Cntr_Num',
String(length=500),
nullable=True,
index=True,
doc='Vendor Contract Number',
comment='The vendor assigned contract number .'),
Column('Special_Pricing_Cntr',
String(length=16),
nullable=True,
doc='Special Pricing Contract',
comment='A Y/N Field to show "Above Item is on Special" for prices sales orders, ship'
'confirmation, order confirmation, and invoice copies.'),
Column('Fut_Vend_Num',
String(length=14),
nullable=True,
doc='Future Vendor Number',
comment='The future contract number assigned by the vendor.'),
Column('Fut_Vend_Cntr_Eff_Date',
Date,
nullable=True,
doc='Future Vender Contract Number Date',
comment='The date the future vendor contract number becomes effective.'),
Column('All_Cust_Flag',
String(length=13),
nullable=True,
doc='All Customer Flag',
comment='This will contain a Y if this contract applies to all customers.'),
Column('Web_Cntr',
String(length=12),
nullable=True,
doc='Web Contract',
comment='This field will contain a Y if this contract is used in Inform E-Commerce to'
'limit the products a customer can order.'),
schema='price.contract')
# noinspection PyPep8Naming
class cntr_header_02_archive(server_utils.mysql_base):
__table__ = Table('cntr_header_02_archive', server_utils.mysql_base.metadata,
Column('ID',
Integer,
primary_key=True,
unique=True,
index=True,
autoincrement=True,
doc='ID',
comment='N/A'),
Column('Date_Time_Stamp',
DateTime,
nullable=True,
doc='Date Time Stamp',
comment='N/A'),
Column('Cntr_Num',
String(length=10),
nullable=True,
index=True,
doc='Contract Number',
comment='The key to the contract record.'),
Column('Desc',
String(length=30),
nullable=True,
doc='Description',
comment='The description of this contract'),
Column('Eff_Date',
Date,
nullable=True,
doc='Effective Date',
comment='The date this contract becomes effective. If blank, date check is not used.'),
Column('Exp_Date',
Date,
nullable=True,
doc='Expiration Date',
comment='The date that this contract ends. If blank, this date check is not used and the'
'contract will never expire.'),
Column('Vend_Num',
String(length=7),
nullable=True,
index=True,
doc='Vendor Number',
comment='The vendor number that is associated with this contract'),
Column('Vend_Cntr_Num',
String(length=500),
nullable=True,
index=True,
doc='Vendor Contract Number',
comment='The vendor assigned contract number .'),
Column('Special_Pricing_Cntr',
String(length=16),
nullable=True,
doc='Special Pricing Contract',
comment='A Y/N Field to show "Above Item is on Special" for prices sales orders, ship'
'confirmation, order confirmation, and invoice copies.'),
Column('Fut_Vend_Num',
String(length=14),
nullable=True,
doc='Future Vendor Number',
comment='The future contract number assigned by the vendor.'),
Column('Fut_Vend_Cntr_Eff_Date',
Date,
nullable=True,
doc='Future Vender Contract Number Date',
comment='The date the future vendor contract number becomes effective.'),
Column('All_Cust_Flag',
String(length=13),
nullable=True,
doc='All Customer Flag',
comment='This will contain a Y if this contract applies to all customers.'),
Column('Web_Cntr',
String(length=12),
nullable=True,
doc='Web Contract',
comment='This field will contain a Y if this contract is used in Inform E-Commerce to'
'limit the products a customer can order.'),
schema='price.contract')
# noinspection PyPep8Naming
class cntr_shipto_01_current(server_utils.mysql_base):
__table__ = Table('cntr_shipto_01_current', server_utils.mysql_base.metadata,
Column('ID',
Integer,
primary_key=True,
unique=True,
index=True,
autoincrement=True,
doc='ID',
comment='N/A'),
Column('Date_Time_Stamp',
DateTime,
nullable=True,
doc='Date Time Stamp',
comment='N/A'),
Column('Cntr_Num',
String(length=10),
nullable=True,
index=True,
doc='Contract Number',
comment='The key to the contract record.'),
Column('Cust_Nums',
String(length=8),
nullable=True,
index=True,
doc='Customer Numbers',
comment='A list of customers assigned to this contract. If "All Customers" is Y, then'
'this field will be blank.'),
Column('Ship_To_Nums',
String(length=8),
nullable=True,
index=True,
doc='Ship To Numbers',
comment='A list of the ship to numbers that are assigned to this contract. If "All'
'Customers" is Y, or this contract applies to all ship tos, then this field will'
'be blank'),
schema='price.contract')
@hybrid_property
def Cust_Num_ShipTo_Combo(self):
if self.__table__.c.Cust_Nums not in (None, ""):
if self.__table__.c.Ship_To_Nums not in (None, ""):
return self.__table__.c.Cust_Nums + "_" + self.__table__.c.Ship_To_Nums
elif self.__table__.c.Ship_To_Nums in (None, ""):
return self.__table__.c.Cust_Nums + "_" + literal("All")
elif self.__table__.c.Cust_Nums in (None, ""):
if self.__table__.c.Ship_To_Nums not in (None, ""):
raise ValueError
elif self.__table__.c.Ship_To_Nums in (None, ""):
return None
# noinspection PyMethodParameters
@Cust_Num_ShipTo_Combo.expression
def Cust_Num_ShipTo_Combo(cls):
return case([
(and_(cls.__table__.c.Cust_Nums.isnot(None), cls.__table__.c.Cust_Nums != ""),
func.IF(and_(cls.__table__.c.Ship_To_Nums.isnot(None), cls.__table__.c.Ship_To_Nums != ""),
func.concat(cls.__table__.c.Cust_Nums, literal("_"), cls.__table__.c.Ship_To_Nums),
func.concat(cls.__table__.c.Cust_Nums, literal("_All")))),
(or_(cls.__table__.c.Cust_Nums.is_(None), cls.__table__.c.Cust_Nums == ""),
func.IF(or_(cls.__table__.c.Ship_To_Nums.is_(None), cls.__table__.c.Ship_To_Nums == ""),
literal(""),
literal("ERROR")))
], else_=literal("ERROR"))
# noinspection PyPep8Naming
class cntr_shipto_02_archive(server_utils.mysql_base):
__table__ = Table('cntr_shipto_02_archive', server_utils.mysql_base.metadata,
Column('ID',
Integer,
primary_key=True,
unique=True,
index=True,
autoincrement=True,
doc='ID',
comment='N/A'),
Column('Date_Time_Stamp',
DateTime,
nullable=True,
doc='Date Time Stamp',
comment='N/A'),
Column('Cntr_Num',
String(length=10),
nullable=True,
index=True,
doc='Contract Number',
comment='The key to the contract record.'),
Column('Cust_Nums',
String(length=8),
nullable=True,
index=True,
doc='Customer Numbers',
comment='A list of customers assigned to this contract. If "All Customers" is Y, then'
'this field will be blank.'),
Column('Ship_To_Nums',
String(length=8),
nullable=True,
index=True,
doc='Ship To Numbers',
comment='A list of the ship to numbers that are assigned to this contract. If "All'
'Customers" is Y, or this contract applies to all ship tos, then this field will'
'be blank'),
schema='price.contract')
@hybrid_property
def Cust_Num_ShipTo_Combo(self):
if self.__table__.c.Cust_Nums not in (None, ""):
if self.__table__.c.Ship_To_Nums not in (None, ""):
return self.__table__.c.Cust_Nums + "_" + self.__table__.c.Ship_To_Nums
elif self.__table__.c.Ship_To_Nums in (None, ""):
return self.__table__.c.Cust_Nums + "_" + literal("All")
elif self.__table__.c.Cust_Nums in (None, ""):
if self.__table__.c.Ship_To_Nums not in (None, ""):
raise ValueError
elif self.__table__.c.Ship_To_Nums in (None, ""):
return None
# noinspection PyMethodParameters
@Cust_Num_ShipTo_Combo.expression
def Cust_Num_ShipTo_Combo(cls):
return case([
(and_(cls.__table__.c.Cust_Nums.isnot(None), cls.__table__.c.Cust_Nums != ""),
func.IF(and_(cls.__table__.c.Ship_To_Nums.isnot(None), cls.__table__.c.Ship_To_Nums != ""),
func.concat(cls.__table__.c.Cust_Nums, literal("_"), cls.__table__.c.Ship_To_Nums),
func.concat(cls.__table__.c.Cust_Nums, literal("_All")))),
(or_(cls.__table__.c.Cust_Nums.is_(None), cls.__table__.c.Cust_Nums == ""),
func.IF(or_(cls.__table__.c.Ship_To_Nums.is_(None), cls.__table__.c.Ship_To_Nums == ""),
literal(""),
literal("ERROR")))
], else_=literal("ERROR"))
# noinspection PyPep8Naming
class cntr_source_01_current(server_utils.mysql_base):
__table__ = Table('cntr_source_01_current', server_utils.mysql_base.metadata,
Column('ID',
Integer,
primary_key=True,
unique=True,
index=True,
autoincrement=True,
doc='ID',
comment='N/A'),
Column('Date_Time_Stamp',
DateTime,
nullable=True,
doc='Date Time Stamp',
comment='N/A'),
Column('Cntr_Num',
String(length=10),
nullable=True,
index=True,
doc='Contract Number',
comment='The key to the contract record.'),
Column('Cust_Src',
String(length=20),
nullable=True,
doc='Customer Source',
comment='Customer source code field associated with the contract.'),
schema='price.contract')
# noinspection PyPep8Naming
class cntr_source_02_archive(server_utils.mysql_base):
__table__ = Table('cntr_source_02_archive', server_utils.mysql_base.metadata,
Column('ID',
Integer,
primary_key=True,
unique=True,
index=True,
autoincrement=True,
doc='ID',
comment='N/A'),
Column('Date_Time_Stamp',
DateTime,
nullable=True,
doc='Date Time Stamp',
comment='N/A'),
Column('Cntr_Num',
String(length=10),
nullable=True,
index=True,
doc='Contract Number',
comment='The key to the contract record.'),
Column('Cust_Src',
String(length=20),
nullable=True,
doc='Customer Source',
comment='Customer source code field associated with the contract.'),
schema='price.contract')
# noinspection PyPep8Naming
class price_matrix_01_current(server_utils.mysql_base):
__table__ = Table('price_matrix_01_current', server_utils.mysql_base.metadata,
Column('ID',
Integer,
primary_key=True,
unique=True,
index=True,
autoincrement=True,
doc='ID',
comment='N/A'),
Column('Date_Time_Stamp',
DateTime,
nullable=True,
doc='Date Time Stamp',
comment='N/A'),
Column('Level_Num',
String(length=5),
nullable=True,
index=True,
doc='Level Number',
comment='The matrix level number. Use the levels button in the Price Matrix for a'
'complete list.'),
Column('Branch_Code',
String(length=6),
nullable=True,
doc='Branch Code',
comment='If blank, this price matrix applies to all orders. Otherwise, this matrix will'
'only be used for the specified branch.'),
Column('All_Cust_Cntr',
String(length=17),
nullable=True,
doc='All Customer Contract',
comment='This flag says every customer is eligible for this contract.'),
Column('Cust_Cat',
String(length=5),
nullable=True,
index=True,
doc='Customer Category Code',
comment='The customer category code that is linked to a price matrix.'),
Column('Cust_Num',
String(length=8),
nullable=True,
index=True,
doc='Customer Number',
comment='The customer number.'),
Column('Ship_To_Code',
String(length=8),
nullable=True,
index=True,
doc='Ship To Code',
comment='The Ship To code number'),
Column('Major_Group',
String(length=5),
nullable=True,
doc='Major Group',
comment='The product Major Group.'),
Column('Prod_Line',
String(length=10),
nullable=True,
doc='Product Line',
comment='The product line code. Use this to display price matrices directly linked to a'
'product line.'),
Column('Price_Group_Code',
String(length=8),
nullable=True,
doc='Price Group Code',
comment='The Price Group code.'),
Column('Prod_Num',
String(length=25),
nullable=True,
index=True,
doc='Product Number',
comment='The product number'),
Column('Qty_Break',
Numeric(precision=19, scale=4),
nullable=True,
doc='Quantity Break',
comment='The quantity break value.'),
Column('Qty_Break_Count',
Integer,
nullable=True,
doc='Quantity Break Count',
comment='The number of quantity breaks for the price matrix.'),
Column('Vend_Num',
String(length=7),
nullable=True,
index=True,
doc='Vendor Number',
comment='The vendor number field.'),
Column('Cntr_Num',
String(length=15),
nullable=True,
doc='Contract Number',
comment='The contract number.'),
Column('Price_Net_Factor',
Numeric(precision=19, scale=4),
nullable=True,
doc='Net Factor Price Amount',
comment='This is the value found in the Amount field for price'),
Column('Price_Pcnt_Dollar',
String(length=1),
nullable=True,
doc='Percent / Dollar Sign',
comment='This field will show a % or a $ sign based on the type of formula used.'),
Column('Price_Plus_Minus',
String(length=1),
nullable=True,
doc='Plus / Minus Sign',
comment='This field will show a + or - sign based on the type of formula used'),
Column('Price_CLN',
String(length=3),
nullable=True,
doc='Price C/L/N',
comment='The C/L/N base field in Price Matrix for determining sales price. This is cost,'
'list, net, or calculated cost'),
Column('Cost_Amount',
Numeric(precision=19, scale=4),
nullable=True,
doc='Cost Amount',
comment='The cost that is displayed in the amount field of the price matrix.'),
Column('Cost_CLN',
String(length=3),
nullable=True,
doc='Cost C/L/N',
comment='The C/L/N base field in Price Matrix for determining cost. This is cost, list,'
'or net'),
Column('Cost_Net_Factor',
Numeric(precision=19, scale=4),
nullable=True,
doc='Cost Net Factor',
comment='Cost net factor'),
Column('Cost_Pcnt_Dollar',
String(length=1),
nullable=True,
doc='Cost Percent / Dollar',
comment='This field will show a % or a $ sign based on the type of cost formula used.'),
Column('Cost_Plus_Minus',
String(length=1),
nullable=True,
doc='Cost Plus / Minus',
comment='Cost Plus/Minus'),
Column('Load_Pcnt',
Numeric(precision=19, scale=4),
| |
import sys
from calendar import month_name
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn.model_selection import TimeSeriesSplit, StratifiedKFold, GroupKFold, KFold
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from dateutil.relativedelta import relativedelta
from collections import Counter, defaultdict
from utils import *
def stratified_group_k_fold(X, y, groups, k, seed=2021):
labels_num = np.max(y) + 1
y_counts_per_group = defaultdict(lambda: np.zeros(labels_num))
y_distr = Counter()
for label, g in zip(y, groups):
y_counts_per_group[g][label] += 1
y_distr[label] += 1
y_counts_per_fold = defaultdict(lambda: np.zeros(labels_num))
groups_per_fold = defaultdict(set)
def eval_y_counts_per_fold(y_counts, fold):
y_counts_per_fold[fold] += y_counts
std_per_label = []
for label in range(labels_num):
label_std = np.std([y_counts_per_fold[i][label] / y_distr[label] for i in range(k)])
std_per_label.append(label_std)
y_counts_per_fold[fold] -= y_counts
return np.mean(std_per_label)
groups_and_y_counts = list(y_counts_per_group.items())
random.Random(seed).shuffle(groups_and_y_counts)
for g, y_counts in sorted(groups_and_y_counts, key=lambda x: -np.std(x[1])):
best_fold = None
min_eval = None
for i in range(k):
fold_eval = eval_y_counts_per_fold(y_counts, i)
if min_eval is None or fold_eval < min_eval:
min_eval = fold_eval
best_fold = i
y_counts_per_fold[best_fold] += y_counts
groups_per_fold[best_fold].add(g)
all_groups = set(groups)
for i in range(k):
train_groups = all_groups - groups_per_fold[i]
test_groups = groups_per_fold[i]
train_indices = [i for i, g in enumerate(groups) if g in train_groups]
test_indices = [i for i, g in enumerate(groups) if g in test_groups]
yield train_indices, test_indices
class StratifiedKFoldWithGroupID():
def __init__(self, group_id_col, stratified_target_id, n_splits, random_state=2021, *args, **kwargs):
super().__init__(*args, **kwargs)
self.group_id_col = group_id_col
self.stratified_target_id = stratified_target_id
self.n_splits = n_splits
self.random_state = random_state
def split(self, _df_X, _se_y, _group, *args, **kwargs):
df = _df_X[[self.group_id_col]]
if isinstance(_se_y, pd.DataFrame):
df["group_target"] = _se_y[self.stratified_target_id]
else:
df["group_target"] = _se_y
df["group_target"] = procLabelEncToSeries(df["group_target"])
df = df.reset_index()
for train_idx, test_idx in stratified_group_k_fold(X=df, y=df["group_target"].values, groups=df[self.group_id_col].values, k=self.n_splits, seed=self.random_state):
print(f"fold train :{df.iloc[train_idx]['group_target'].value_counts()}")
print(f"fold valid :{df.iloc[test_idx]['group_target'].value_counts()}")
#pdb.set_trace()
yield train_idx, test_idx
class ssl1Fold():
def __init__(self):
self.n_splits = 1#n_splits
def split(self, _df_X, _df_y, _group, *args, **kwargs):
df = _df_X.reset_index()
for i in range(1):
train_idx = df.index
test_idx = df.loc[df["train_flag"]==1].index
yield train_idx, test_idx
class DummyKfold():
def __init__(self, n_splits, random_state):
self.n_splits = n_splits
self.random_state = random_state
def split(self, _df_X, _df_y, _group, *args, **kwargs):
# num_data = len(_df_X)
# idx_list = np.arange(num_data)
# kf = KFold(n_splits=self.n_splits, random_state=self.random_state, shuffle=True)
for i in range(self.n_splits):
# print("TRAIN:", train_index, "TEST:", test_index)
# yield train_index, test_index
yield [0], [0]
class SeasonKFold():
"""時系列情報が含まれるカラムでソートした iloc を返す KFold"""
def __init__(self, n_splits, ts_column="Season", clipping=False, num_seasons=5,*args, **kwargs):
super().__init__(*args, **kwargs)
# 時系列データのカラムの名前
self.ts_column = ts_column
# 得られる添字のリストの長さを過去最小の Fold に揃えるフラグ
self.clipping = clipping
self.num_seasons = num_seasons
self.n_splits = n_splits
def split(self, X, *args, **kwargs):
# 渡されるデータは DataFrame を仮定する
assert isinstance(X, pd.DataFrame)
# clipping が有効なときの長さの初期値
train_fold_min_len, test_fold_min_len = sys.maxsize, sys.maxsize
# 時系列のカラムを取り出す
ts = X[self.ts_column]
# 元々のインデックスを振り直して iloc として使える値 (0, 1, 2...) にする
ts_df = ts.reset_index()
ts_list = sorted(ts_df[self.ts_column].unique())
for i in range(self.n_splits):
# 添字を元々の DataFrame の iloc として使える値に変換する
train_list = ts_list[:-self.num_seasons]
test_list= ts_list[-self.num_seasons:]
print(f"train season: {train_list}")
print(f"test season: {test_list}")
#pdb.set_trace()
train_iloc_index = ts_df.loc[ts_df[self.ts_column].isin(train_list)].index
test_iloc_index = ts_df.loc[ts_df[self.ts_column].isin(test_list)].index
ts_list = train_list
if self.clipping:
# TimeSeriesSplit.split() で返される Fold の大きさが徐々に大きくなることを仮定している
train_fold_min_len = min(train_fold_min_len, len(train_iloc_index))
test_fold_min_len = min(test_fold_min_len, len(test_iloc_index))
yield list(train_iloc_index[-train_fold_min_len:]), list(test_iloc_index[-test_fold_min_len:])
class TournamentGroupKFold(GroupKFold):
def __init__(self, group_id_col="Season", day_num_col="DayNum",tournament_start_daynum=133, *args, **kwargs):
super().__init__(*args, **kwargs)
self.group_id_col = group_id_col
self.day_num_col = day_num_col
self.tournament_start_daynum = tournament_start_daynum
def split(self, _df_X, _df_y, _group, *args, **kwargs):
df_tournament = _df_X.loc[_df_X[self.day_num_col]>=self.tournament_start_daynum]
df_tournament_y = _df_y.loc[df_tournament.index]
df_reg = _df_X.loc[~_df_X.index.isin(df_tournament.index)]
reg_index = _df_X.index.get_indexer(df_reg.index).tolist()
if _group is None:
_group = df_tournament[self.group_id_col]
for train_id_index, test_id_index in super().split(df_tournament, df_tournament_y, _group):
# print(f"train_id_index:{train_id_index}")
# print(f"test_id_index:{test_id_index}")
# print(f"train season: {df_tournament.iloc[train_id_index]['Season'].unique()}")
# print(f"test season: {df_tournament.iloc[test_id_index]['Season'].unique()}")
train_set_ID = df_tournament.iloc[train_id_index].index
test_set_ID = df_tournament.iloc[test_id_index].index
train_index_list = _df_X.index.get_indexer(train_set_ID).tolist()
test_index_list = _df_X.index.get_indexer(test_set_ID).tolist()
yield train_index_list+reg_index, test_index_list
class VirtulTimeStampSplit():
def __init__(self, n_splits, num_valid=2500000):
self.num_valid = num_valid
self.n_splits = n_splits
def split_melt(self, _df_X, _df_y, _group):
cpu_stats(f"in split")
row_id_sequence = _df_X.index.tolist()
row_id_list = _df_X.index.unique().tolist()
#print(row_id_list)
all_train_idx = range(len(_df_X))
print(f"row_id_sequence : {sys.getsizeof(row_id_sequence)}")
print(f"row_id_list : {sys.getsizeof(row_id_list)}")
print(f"all_train_idx : {sys.getsizeof(all_train_idx)}")
cpu_stats(f"after creating all_train_idx")
for n in range(self.n_splits):
valid_row_id_list = row_id_list[-self.num_valid:]
first_valid_row_id = valid_row_id_list[0]
valid_id_from = row_id_sequence.index(first_valid_row_id)
valid = all_train_idx[valid_id_from:]
cpu_stats(f"mid yield")
row_id_list = row_id_list[:-self.num_valid]
all_train_idx = all_train_idx[:valid_id_from]#_df_X.index.get_loc(row_id_list)
print(f"fold : {n}")
print(f"train : {len(all_train_idx)}, {all_train_idx}")
print(f"valid : {len(valid)}, {valid}")
cpu_stats(f"before yield")
yield all_train_idx, valid
def split(self, _df_X, _df_y, _group):
all_train_idx = range(len(_df_X))
for n in range(self.n_splits):
valid = all_train_idx[-self.num_valid:]
all_train_idx = all_train_idx[:-self.num_valid]
print(f"fold : {n}")
print(f"train : {len(all_train_idx)}, {all_train_idx}")
print(f"valid : {len(valid)}, {valid}")
yield all_train_idx, valid
def return_debug_index(debug, _train_idx, _val_idx, rate=0.5):
if debug:
train_idx = random.sample(_train_idx, int(len(_train_idx)*rate))
val_idx = random.sample(_val_idx, int(len(_val_idx)*rate))
else:
train_idx = _train_idx
val_idx = _val_idx
return train_idx, val_idx
class myGroupKFold(GroupKFold):
def __init__(self, group_id_col, cut_uout_flag, debug, *args, **kwargs):
super().__init__(*args, **kwargs)
self.group_id_col = group_id_col
self.cut_uout_flag = cut_uout_flag
self.debug = debug
def split(self, _df_X, _df_y, _group=None, *args, **kwargs):
df_X = _df_X.reset_index()
#if _group is None:
group = df_X[self.group_id_col]
for train_index, val_index in super().split(df_X, _df_y, group):
if self.debug:
train_gp_id_list = list(df_X.iloc[train_index][self.group_id_col].unique())
val_gp_id_list = list(df_X.iloc[val_index][self.group_id_col].unique())
train_gp_id_list, val_gp_id_list = return_debug_index(self.debug, train_gp_id_list, val_gp_id_list, rate=0.5)
train_index = df_X.loc[df_X[self.group_id_col].isin(train_gp_id_list)].index
val_index = df_X.loc[df_X[self.group_id_col].isin(val_gp_id_list)].index
#.set_trace()
if self.cut_uout_flag:
df_train = df_X.iloc[train_index]
new_train_index = df_train.loc[df_train["u_out"]==0].index
#new_train_index=train_index
df_val = df_X.iloc[val_index]
new_val_index = df_val.loc[df_val["u_out"]==0].index
print(f"train: {df_X.loc[train_index, 'u_out'].value_counts()}")
print(f"test: {df_X.loc[val_index, 'u_out'].value_counts()}")
print(f"test u_out 0 : {df_X.loc[new_val_index, 'u_out'].value_counts()}")
yield new_train_index, new_val_index
else:
yield train_index, val_index
class TimeStampNewUserSplit(GroupKFold):
def __init__(self, group_id_col, new_user_head_num=10, old_user_tail_num=10, *args, **kwargs):
super().__init__(*args, **kwargs)
self.group_id_col = group_id_col
self.new_user_head_num = new_user_head_num
self.old_user_tail_num = old_user_tail_num
def split(self, _df_X, _df_y, _group, *args, **kwargs): # we assume df has already been sorted by timestamp
df = _df_X[[self.group_id_col]]
df[_df_y.columns] = _df_y
df = df.reset_index()
if _group is None:
_group = df[self.group_id_col]
fold_idx=1
for old_id_index, new_id_index in super().split(df[self.group_id_col], df[_df_y.columns], _group):
test_new_user_index = set(df.iloc[new_id_index].groupby(self.group_id_col).head(self.new_user_head_num).index)
test_old_user_index = set(df.iloc[old_id_index].groupby(self.group_id_col).tail(self.old_user_tail_num).index)
#print(f"test_new_user : {len(test_new_user)}, test_old_user : {len(test_old_user)}")
#print(f"test_new_user : {len(test_new_user_index)}, test_old_user : {len(test_old_user_index)}")
#print(f"train_old_user_index ; {len(train_old_user_index)}, add : {len(train_old_user_index) + len(test_old_user_index)}")
#print(f"old_id_index : {len(old_id_index)}, new_id_index : {len(new_id_index)}")
#print( df.iloc[new_id_index].groupby(self.group_id_col).head(self.new_user_head_num))
#print(f"{df.iloc[test_old_user_index].groupby(self.group_id_col).count()}")
cpu_stats(f"TimeStampNewUserSplit")
fold_idx+=1
yield list(set(old_id_index) - test_old_user_index), list(test_new_user_index|test_old_user_index)
class myStratifiedKFold(StratifiedKFold):
def __init__(self, stratified_col, *args, **kwargs):
super().__init__(*args, **kwargs)
self.stratified_col = stratified_col
def split(self, _df_X, _df_y, dummy_group, *args, **kwargs):
if self.stratified_col in _df_y.columns:
df_y = _df_y[[self.stratified_col]]
else: #self.stratified_col in _df_X.columns:
df_y = _df_X[[self.stratified_col]]
df_y = df_y.reset_index()
for train_id_index, test_id_index in super().split(_df_X, df_y[self.stratified_col]):
print(f"fold train :{train_id_index}")
print(f"{df_y.iloc[train_id_index][self.stratified_col].value_counts()}")
print(f"fold valid :{test_id_index}")
print(f"{df_y.iloc[test_id_index][self.stratified_col].value_counts()}")
# train_label_list = df_y.iloc[train_id_index][self.stratified_col].unique()
# test_label_list = df_y.iloc[test_id_index][self.stratified_col].unique()
# only_test_list = list(set(test_label_list) - set(train_label_list))
# if len(only_test_list)>0:
# only_test_index = list(df_y.loc[df_y[self.stratified_col].isin(only_test_list)].index)
# train_id_index = np.array(train_id_index.tolist()+only_test_index)
# for num in only_test_index:
# new_test_id_index = test_id_index.tolist()
# new_test_id_index.remove(num)
# test_id_index = np.array(new_test_id_index)
yield train_id_index, test_id_index
class siteStratifiedPathGroupKFold():
def __init__(self, df_test, group_id_col, stratified_target_id, n_splits):
self.group_id_col = group_id_col
self.stratified_target_id = stratified_target_id
self.n_splits = n_splits
self.df_test_info = df_test.groupby(self.stratified_target_id)[self.group_id_col].agg(["count", "nunique"])
def split(self, _df_X, _se_y, _group, *args, **kwargs):
df_train = _df_X[[self.group_id_col, self.stratified_target_id]]
df_train = df_train.reset_index()
df_train["fold"]=self.n_splits
for site_id, row in self.df_test_info.iterrows():
count = row["count"]
nunique = row["nunique"]
path_list = df_train.loc[df_train[self.stratified_target_id]==site_id, self.group_id_col].unique()
random.shuffle(path_list)
path_set_list= [t for t in zip(*[iter(path_list)]*nunique)]
diff_dict = {}
for i, path_set in enumerate(path_set_list):
#print(f"{i} : {path_set}")
train_path_count = df_train.loc[df_train[self.group_id_col].isin(path_set), self.group_id_col].count()
diff_count = abs(train_path_count-count)
diff_dict[i] = diff_count
sort_i_list = sorted(diff_dict.items(), key=lambda x:x[1])
#print(sort_i_list)
for k in range(self.n_splits):
sort_i = sort_i_list[k][0]
path_set =path_set_list[sort_i]
df_train.loc[df_train[self.group_id_col].isin(path_set), "fold"] = k
##print(f"{sort_i}, {sort_i_list[k]}")
#print(f"df_train fold k : {df_train.loc[df_train['fold']==k].shape}")
#pdb.set_trace()
for k in range(self.n_splits):
#df_fold_t = df_train.loc[df_train["fold"]==k, ["site_id", "path"]]
#print(f"fold {k}:")
#print(df_fold_t.groupby("site_id")["path"].agg(["count", "nunique"]))
yield list(df_train.loc[df_train["fold"]!=k].index), list(df_train.loc[df_train["fold"]==k].index)
class myStratifiedKFoldWithGroupID(StratifiedKFold):
def __init__(self, group_id_col, stratified_target_id, *args, **kwargs):
super().__init__(*args, **kwargs)
self.group_id_col = group_id_col
self.stratified_target_id = stratified_target_id
def split(self, _df_X, _se_y, _group, *args, **kwargs):
#_df_X["group_target"] = _se_y
df = _df_X[[self.group_id_col, self.stratified_target_id]]
#df["group_target"] = _se_y
df = df.reset_index()
gp = df.groupby(self.group_id_col)[self.stratified_target_id].apply(lambda x:x.mode()[0])
#print(gp)
#print(gp.index)
#del df
#gc.collect()
fold_idx=1
for train_id_index, test_id_index in super().split(gp.index, gp, _group):
#print(f"fold_idx : {fold_idx}")
#print(f"train_id_index : {train_id_index}, test_id_index : {test_id_index}")
#print(f"train_id : {gp.index[train_id_index]}, test_id : {gp.index[test_id_index]}")
train_id_list = list(gp.index[train_id_index])
test_id_list = list(gp.index[test_id_index])
print(f"fold train :{df.loc[df[self.group_id_col].isin(train_id_list), self.stratified_target_id].value_counts()}")
print(f"fold valid :{df.loc[df[self.group_id_col].isin(test_id_list), self.stratified_target_id].value_counts()}")
#print(f"train_seq_id : {df.loc[df[self.group_id_col].isin(train_id_list)].index}, test_id : {df.loc[df[self.group_id_col].isin(test_id_list)].index}")
fold_idx+=1
yield list(df.loc[df[self.group_id_col].isin(train_id_list)].index), list(df.loc[df[self.group_id_col].isin(test_id_list)].index)
# class StratifiedKFoldWithGroupID(StratifiedKFold):
# def __init__(self, group_id_col, *args, **kwargs):
# super().__init__(*args, **kwargs)
# self.group_id_col = group_id_col
# def split(self, _df_X, _se_y, _group, *args, **kwargs):
# #_df_X["group_target"] = _se_y
# df = _df_X[[self.group_id_col]]
# df["group_target"] = _se_y
# df["group_target"] = | |
<reponame>TingtingAlice/MPM-LTL
from __future__ import print_function
import os.path as osp
import time
import numpy as np
import scipy.io as sio
import matplotlib.image as mpimg
import glob
from collections import defaultdict
import shutil
import torch
from utils import may_make_dir
from utils import save_pickle
from utils import load_pickle
new_im_name_tmpl = '{:08d}_{:04d}_{:08d}.jpg'
def parse_im_name(im_name, parse_type='id'):
"""Get the person id or cam from an image name."""
assert parse_type in ('id', 'cam')
if parse_type == 'id':
parsed = int(im_name[:8])
else:
parsed = int(im_name[9:13])
return parsed
def get_im_names(im_dir, pattern='*.png', return_np=True, return_path=False):
"""Get the image names in a dir. Optional to return numpy array, paths."""
im_paths = glob.glob(osp.join(im_dir, pattern))
im_names = [osp.basename(path) for path in im_paths]
ret = im_paths if return_path else im_names
if return_np:
ret = np.array(ret)
return ret
def intersection_mask(mask_1, mask_2):
"""the intersection set between mask_1 and mask_2"""
mask_sum = mask_1 + mask_2
tem = (mask_sum==2)
count = mask_sum[tem].size
return count
def nearest_mask(anchor_mask, sample_, top=1):
"""
the intersection set between an anchor_mask and the images in sample_
Args:
anchor_mask = mpimg.imread(anchor)[:,:,3]
sample_ = a list of images
Return:
sample: the nearest one (to anchor) in sample_
"""
count_list=[]
for i in range(len(sample_)):
sa = sample_[i]
sa_mask = mpimg.imread(sa)[:,:,3]
count = intersection_mask(anchor_mask, sa_mask)
count_list.append(count)
count_list = np.array(count_list)
index = np.argsort(count_list) # sort from small to large
index = index[::-1] # sort from large to small
sample = []
for j in range(top):
sample.append(sample_[index[j]]) # the index of the largest one/Top x (x depend on top)
return sample
def anchor_positive_negative(im_paths, parse_im_name, save_dir):
"""
1. compute the intersection set of masks
2. compute the distance of features
3. calculate the difference between d_n and d_p
"""
feat_path='/GPUFS/nsccgz_ywang_1/alice/dataset/pcb/trans/tri/market_30_retain_feat.mat'
feat_mat=sio.loadmat(feat_path)
im_names=feat_mat['im_names']
# h_list=feat_mat['h_list']
feats=feat_mat['feat']
im_names2feats=dict(zip(im_names,feats)) # use the new im_names as keys
name2new = names2newnames(im_paths, parse_im_name, new_im_name_tmpl) # change the ori_name to new_name
anchor_list = []
positive_list = []
negative_list = []
since = time.time()
localtime = time.asctime( time.localtime(time.time()) )
print('--- Current Time: ',localtime)
print('--- Dividing ...')
for i in range(len(im_paths)):
anchor_path = im_paths[i]
anchor = osp.basename(anchor_path)
new_an = name2new[anchor] # change anchor to new name
feat_an = im_names2feats[new_an] # the feature of anchor
aid = parse_im_name(anchor,'id')
acam = parse_im_name(anchor,'cam')
an_mask = mpimg.imread(anchor_path)[:,:,3]
# the same id but in the different cam:
positive_ = [p for p in im_paths
if (parse_im_name(osp.basename(p), 'id') == aid) & (parse_im_name(osp.basename(p), 'cam') != acam) ]
# the different id but in the same cam:
negative_ = [p for p in im_paths
if (parse_im_name(osp.basename(p), 'id') != aid) & (parse_im_name(osp.basename(p), 'cam') == acam) ]
# Positive
positive_path = nearest_mask(an_mask, positive_)
positive_path = positive_path[0]
positive = osp.basename(positive_path)
new_po = name2new[positive] # change positive to new name
feat_po = im_names2feats[new_po] # the feature of positive
# Negative
negative_path = nearest_mask(an_mask, negative_, top=20)
nega_tem_list = []
feat_nega_list = []
for j in range(20):
negative = osp.basename(negative_path[j])
new_ne = name2new[negative] # change negative to new name
nega_tem_list.append(new_ne)
feat_ne = im_names2feats[new_ne] # the feature of positive
feat_nega_list.append(feat_ne)
hard_index = hard_sample(feat_an, feat_po, feat_nega_list, small=5)
for k in range(5):
anchor_list.append(new_an)
positive_list.append(new_po)
negative_list.append(nega_tem_list[hard_index[k]])
time_elapsed = time.time() - since
print('--- Dividing complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
apn=dict(anchor=anchor_list,positive=positive_list,negative=negative_list)
anchor_positive_negative_file = osp.join(save_dir, 'anchor_positive_negative_split.pkl')
save_pickle(apn, anchor_positive_negative_file)
def anchor_positive_negative_2(im_paths, parse_im_name, save_dir):
"""
compute the distance of features first, and then calculate the difference between d_n and d_p, last compute the intersection set of masks
"""
feat_path='/GPUFS/nsccgz_ywang_1/alice/dataset/pcb/trans/tri/market_30_retain_feat.mat'
feat_mat=sio.loadmat(feat_path)
im_names=feat_mat['im_names']
# h_list=feat_mat['h_list']
feats=feat_mat['feat']
im_names2feats=dict(zip(im_names,feats)) # use the new im_names as keys
old2new = names2newnames(im_paths, parse_im_name, new_im_name_tmpl) # change the ori_name to new_name
anchor_list = []
positive_list = []
negative_list = []
since = time.time()
localtime = time.asctime( time.localtime(time.time()) )
print('--- Current Time: ',localtime)
print('--- Dividing ...')
for i in range(len(im_paths)):
anchor_path = im_paths[i]
anchor = osp.basename(anchor_path)
new_an = old2new[anchor] # change anchor to new name
feat_an = im_names2feats[new_an] # the feature of anchor
aid = parse_im_name(anchor,'id')
acam = parse_im_name(anchor,'cam')
an_mask = mpimg.imread(anchor_path)[:,:,3]
# the same id but in the different cam:
positive_ = [p for p in im_paths
if (parse_im_name(osp.basename(p), 'id') == aid) & (parse_im_name(osp.basename(p), 'cam') != acam) ]
# the different id but in the same cam:
negative_ = [p for p in im_paths
if (parse_im_name(osp.basename(p), 'id') != aid) & (parse_im_name(osp.basename(p), 'cam') == acam) ]
### Positive
# posi_tem_list = []
feat_posi_list = []
for p in range(len(positive_)):
positive = osp.basename(positive_[p])
new_po_t = old2new[positive] # change positive to new name
# posi_tem_list.append(new_po_t)
feat_po_t = im_names2feats[new_po_t] # the feature of positive
feat_posi_list.append(feat_po_t)
dist_ap = dist2(feat_an, feat_posi_list) # compute the distance of feature between anchor and positive_
index = np.argsort(dist_ap)
index_ap = index[0:5]
posi_list = []
for q in range(len(index_ap)):
posi_list.append(positive_[index_ap[q]]) # select five postive candidates which distances of feature are the farthest five
positive_path = nearest_mask(an_mask, posi_list) # choose the positive from five positive candidates
positive_path = positive_path[0]
positive = osp.basename(positive_path)
new_po = old2new[positive] # change positive to new name
feat_po = im_names2feats[new_po] # the feature of positive
### Negative
# nega_tem_list = []
feat_nega_list = []
for j in range(len(negative_)):
negative = osp.basename(negative_[j])
new_ne_t = old2new[negative] # change negative to new name
# nega_tem_list.append(new_ne_t)
feat_ne_t = im_names2feats[new_ne_t] # the feature of negative
feat_nega_list.append(feat_ne_t)
### Hard Sample
hard_index = hard_sample(feat_an, feat_po, feat_nega_list, small=20) # compute the distance of feature between anchor and negative_
# print(len(hard_index))
negative_tem_path = []
for k in range(20):
negative_tem_path.append(negative_[hard_index[k]])
negative_path = nearest_mask(an_mask, negative_tem_path, top=5)
for k in range(5):
anchor_list.append(new_an)
positive_list.append(new_po)
negative = osp.basename(negative_path[k])
new_ne = old2new[negative] # change positive to new name
negative_list.append(new_ne)
time_elapsed = time.time() - since
print('--- Dividing complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
apn=dict(anchor=anchor_list,positive=positive_list,negative=negative_list)
anchor_positive_negative_file = osp.join(save_dir, 'anchor_positive_negative_split_2.pkl')
save_pickle(apn, anchor_positive_negative_file)
def anchor_positive_negative_2_2(im_paths, parse_im_name, save_dir):
"""
compute the distance of features first, and then calculate the difference between d_n and d_p, last compute the intersection set of masks
"""
feat_path='/GPUFS/nsccgz_ywang_1/alice/dataset/pcb/trans/tri/market_30_retain_feat.mat'
feat_mat=sio.loadmat(feat_path)
im_names=feat_mat['im_names']
# h_list=feat_mat['h_list']
feats=feat_mat['feat']
im_names2feats=dict(zip(im_names,feats)) # use the new im_names as keys
old2new = names2newnames(im_paths, parse_im_name, new_im_name_tmpl) # change the ori_name to new_name
anchor_list = []
positive_list = []
negative_list = []
since = time.time()
localtime = time.asctime( time.localtime(time.time()) )
print('--- Current Time: ',localtime)
print('--- Dividing ...')
for i in range(len(im_paths)):
anchor_path = im_paths[i]
anchor = osp.basename(anchor_path)
new_an = old2new[anchor] # change anchor to new name
feat_an = im_names2feats[new_an] # the feature of anchor
aid = parse_im_name(anchor,'id')
acam = parse_im_name(anchor,'cam')
an_mask = mpimg.imread(anchor_path)[:,:,3]
# the same id but in the different cam:
positive_ = [p for p in im_paths
if (parse_im_name(osp.basename(p), 'id') == aid) & (parse_im_name(osp.basename(p), 'cam') != acam) ]
# the different id but in the same cam:
negative_ = [p for p in im_paths
if (parse_im_name(osp.basename(p), 'id') != aid) & (parse_im_name(osp.basename(p), 'cam') == acam) ]
### Positive
# posi_tem_list = []
feat_posi_list = []
for p in range(len(positive_)):
positive = osp.basename(positive_[p])
new_po_t = old2new[positive] # change positive to new name
# posi_tem_list.append(new_po_t)
feat_po_t = im_names2feats[new_po_t] # the feature of positive
feat_posi_list.append(feat_po_t)
dist_ap = dist2(feat_an, feat_posi_list) # compute the distance of feature between anchor and positive_
index = np.argsort(dist_ap)
index = index[::-1]
index_ap = index[0:5]
posi_list = []
for q in range(len(index_ap)):
posi_list.append(positive_[index_ap[q]]) # select five postive candidates which distances of feature are the farthest five
positive_path = nearest_mask(an_mask, posi_list) # choose the positive from five positive candidates
positive_path = positive_path[0]
positive = osp.basename(positive_path)
new_po = old2new[positive] # change positive to new name
feat_po = im_names2feats[new_po] # the feature of positive
### Negative
# nega_tem_list = []
feat_nega_list = []
for j in range(len(negative_)):
negative = osp.basename(negative_[j])
new_ne_t = old2new[negative] # change negative to new name
# nega_tem_list.append(new_ne_t)
feat_ne_t = im_names2feats[new_ne_t] # the feature of negative
feat_nega_list.append(feat_ne_t)
### Hard Sample
hard_index = hard_sample(feat_an, feat_po, feat_nega_list, small=20) # compute the distance of feature between anchor and negative_
# print(len(hard_index))
negative_tem_path = []
for k in range(20):
negative_tem_path.append(negative_[hard_index[k]])
negative_path = nearest_mask(an_mask, negative_tem_path, top=5)
for k in range(5):
anchor_list.append(new_an)
positive_list.append(new_po)
negative = osp.basename(negative_path[k])
new_ne = old2new[negative] # change positive to new name
negative_list.append(new_ne)
time_elapsed = time.time() - since
print('--- Dividing complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
apn=dict(anchor=anchor_list,positive=positive_list,negative=negative_list)
anchor_positive_negative_file = osp.join(save_dir, 'anchor_positive_negative_split_2_2.pkl')
save_pickle(apn, anchor_positive_negative_file)
def | |
upper += via_ext
# draw ports
lower_tracks, upper_tracks = port_tracks[cur_layer]
lower_warrs = [self.add_wires(cur_layer, tr_idx, lower, upper, width=port_tr_w)
for tr_idx in lower_tracks]
upper_warrs = [self.add_wires(cur_layer, tr_idx, lower, upper, width=port_tr_w)
for tr_idx in upper_tracks]
# assign port wires to positive/negative terminals
num_ports = len(lower_warrs)
if port_plow:
if num_ports == 1:
plist = lower_warrs
nlist = upper_warrs
else:
plist = [lower_warrs[0], upper_warrs[0]]
nlist = [lower_warrs[1], upper_warrs[1]]
else:
if num_ports == 1:
plist = upper_warrs
nlist = lower_warrs
else:
plist = [lower_warrs[1], upper_warrs[1]]
nlist = [lower_warrs[0], upper_warrs[0]]
# save ports
port_dict[cur_layer] = plist, nlist
# compute cap wires BBoxArray
cap_bndl, cap_bndh = cap_bounds[cur_layer]
cap_tot_space = cap_bndh - cap_bndl
cap_pitch = cap_w + cap_sp
num_cap_wires = cap_tot_space // cap_pitch
cap_bndl += (cap_tot_space - (num_cap_wires * cap_pitch - cap_sp)) // 2
cur_dir = grid.get_direction(cur_layer)
cap_box0 = BBox(cur_dir, lower, upper, cap_bndl, cap_bndl + cap_w)
lay_purp_list = tech_info.get_lay_purp_list(cur_layer)
num_lay_purp = len(lay_purp_list)
assert num_lay_purp <= 2, 'This method now only works for 1 or 2 colors.'
num0 = (num_cap_wires + 1) // 2
num1 = num_cap_wires - num0
barr_pitch = cap_pitch * 2
cap_box1 = cap_box0.get_move_by_orient(cur_dir, dt=0, dp=cap_pitch)
barr0 = BBoxArray(cap_box0, cur_dir, np=num0, spp=barr_pitch)
barr1 = BBoxArray(cap_box1, cur_dir, np=num1, spp=barr_pitch)
if port_plow:
capp_barr = barr1
capn_barr = barr0
capp_lp = lay_purp_list[-1]
capn_lp = lay_purp_list[0]
else:
capp_barr = barr0
capn_barr = barr1
capp_lp = lay_purp_list[0]
capn_lp = lay_purp_list[-1]
# draw cap wires
self.add_bbox_array(capp_lp, capp_barr)
self.add_bbox_array(capn_lp, capn_barr)
# save caps
cap_barr_tuple = (capp_lp, capn_lp, capp_barr, capn_barr)
cap_wire_dict[cur_layer] = cap_barr_tuple
if cap_wires_list is not None:
cap_wires_list.append(cap_barr_tuple)
# connect port/cap wires to bottom port/cap
if cur_layer != bot_layer:
# connect ports to layer below
bplist, bnlist = port_dict[cur_layer - 1]
bcapp_lp, bcapn_lp, bcapp, bcapn = cap_wire_dict[cur_layer - 1]
self._add_mom_cap_connect_ports(bplist, plist)
self._add_mom_cap_connect_ports(bnlist, nlist)
self._add_mom_cap_connect_cap_to_port(Direction.UPPER, capp_lp, capp_barr, bplist)
self._add_mom_cap_connect_cap_to_port(Direction.UPPER, capn_lp, capn_barr, bnlist)
self._add_mom_cap_connect_cap_to_port(Direction.LOWER, bcapp_lp, bcapp, plist)
self._add_mom_cap_connect_cap_to_port(Direction.LOWER, bcapn_lp, bcapn, nlist)
return port_dict
def _add_mom_cap_connect_cap_to_port(self, cap_dir: Direction, cap_lp: Tuple[str, str],
barr: BBoxArray, ports: List[WireArray]) -> None:
num_ports = len(ports)
if num_ports == 1:
self.connect_bbox_to_tracks(cap_dir, cap_lp, barr, ports[0].track_id)
else:
port_dir = self.grid.get_direction(ports[0].layer_id)
for idx, warr in enumerate(ports):
new_barr = barr.get_sub_array(port_dir, num_ports, idx)
self.connect_bbox_to_tracks(cap_dir, cap_lp, new_barr, warr.track_id)
def _add_mom_cap_connect_ports(self, bot_ports: List[WireArray], top_ports: List[WireArray]
) -> None:
for bot_warr, top_warr in product(bot_ports, top_ports):
self.add_via_on_grid(bot_warr.track_id, top_warr.track_id, extend=True)
def reserve_tracks(self, layer_id: int, track_idx: TrackType, *,
width: int = 1, num: int = 1, pitch: int = 0) -> None:
"""Reserve the given routing tracks so that power fill will not fill these tracks.
Note: the size of this template should be set before calling this method.
Parameters
----------
layer_id : int
the wire layer ID.
track_idx : TrackType
the smallest wire track index.
width : int
the wire width in number of tracks.
num : int
number of wires.
pitch : TrackType
the wire pitch.
"""
# TODO: fix this method
raise ValueError('Not implemented yet.')
def get_available_tracks(self, layer_id: int, tid_lo: TrackType, tid_hi: TrackType,
lower: int, upper: int, width: int = 1, sep: HalfInt = HalfInt(1),
include_last: bool = False, sep_margin: Optional[HalfInt] = None
) -> List[HalfInt]:
"""Returns a list of available tracks between the given bounds.
Parameters
----------
layer_id : int
the layer ID.
tid_lo : TrackType
the lower track index, inclusive.
tid_hi : TrackType
the upper track index, exclusive by default.
lower : int
the lower wire coordinate.
upper: int
the upper wire coordinate.
width : int
the track width.
sep : HalfInt
the track separation
include_last : bool
True to make "upper" inclusive.
sep_margin : Optional[HalfInt]
the margin between available tracks and surrounding wires, in number of tracks.
Returns
-------
tidx_list : List[HalfInt]
list of available tracks.
"""
grid = self.grid
orient = grid.get_direction(layer_id)
tr_info = grid.get_track_info(layer_id)
if sep_margin is None:
sep_margin = grid.get_sep_tracks(layer_id, width, 1, same_color=False)
bl, bu = grid.get_wire_bounds_htr(layer_id, 0, width)
tr_w2 = (bu - bl) // 2
margin = tr_info.pitch * sep_margin - (tr_info.width // 2) - tr_w2
sp_list = [0, 0]
sp_list[orient.value ^ 1] = margin
spx, spy = sp_list
htr0 = HalfInt.convert(tid_lo).dbl_value
htr1 = HalfInt.convert(tid_hi).dbl_value
if include_last:
htr1 += 1
htr_sep = HalfInt.convert(sep).dbl_value
ans = []
cur_htr = htr0
while cur_htr < htr1:
mid = grid.htr_to_coord(layer_id, cur_htr)
box = BBox(orient, lower, upper, mid - tr_w2, mid + tr_w2)
if not self._layout.get_intersect(layer_id, box, spx, spy, False):
ans.append(HalfInt(cur_htr))
cur_htr += htr_sep
else:
cur_htr += 1
return ans
def connect_wires(self, wire_arr_list: Union[WireArray, List[WireArray]], *,
lower: Optional[int] = None,
upper: Optional[int] = None,
debug: bool = False,
) -> List[WireArray]:
"""Connect all given WireArrays together.
all WireArrays must be on the same layer.
Parameters
----------
wire_arr_list : Union[WireArr, List[WireArr]]
WireArrays to connect together.
lower : Optional[CoordType]
if given, extend connection wires to this lower coordinate.
upper : Optional[CoordType]
if given, extend connection wires to this upper coordinate.
debug : bool
True to print debug messages.
Returns
-------
conn_list : List[WireArray]
list of connection wires created.
"""
grid = self._grid
if lower is None:
lower = COORD_MAX
if upper is None:
upper = COORD_MIN
# record all wire ranges
layer_id = None
intv_set = IntervalSet()
for wire_arr in WireArray.wire_grp_iter(wire_arr_list):
# NOTE: no need to copy with new grid, this TrackID is not used to create WireArrays
tid = wire_arr.track_id
lay_id = tid.layer_id
tr_w = tid.width
if layer_id is None:
layer_id = lay_id
elif lay_id != layer_id:
raise ValueError('WireArray layer ID != {}'.format(layer_id))
cur_range = wire_arr.lower, wire_arr.upper
for tidx in tid:
intv = grid.get_wire_bounds(lay_id, tidx, width=tr_w)
intv_rang_item = intv_set.get_first_overlap_item(intv)
if intv_rang_item is None:
range_set = IntervalSet()
range_set.add(cur_range)
intv_set.add(intv, val=(range_set, tidx, tr_w))
elif intv_rang_item[0] == intv:
tmp_rang_set: IntervalSet = intv_rang_item[1][0]
tmp_rang_set.add(cur_range, merge=True, abut=True)
else:
raise ValueError(f'wire on lay={lay_id}, track={tidx} overlap existing wires. '
f'wire interval={intv}, overlapped wire '
f'interval={intv_rang_item[0]}')
# draw wires, group into arrays
new_warr_list = []
base_start = None # type: Optional[int]
base_end = None # type: Optional[int]
base_tidx = None # type: Optional[HalfInt]
base_width = None # type: Optional[int]
count = 0
pitch = 0
last_tidx = 0
for set_item in intv_set.items():
intv = set_item[0]
range_set: IntervalSet = set_item[1][0]
cur_tidx: HalfInt = set_item[1][1]
cur_tr_w: int = set_item[1][2]
cur_start = min(lower, range_set.start)
cur_end = max(upper, range_set.stop)
if debug:
print('wires intv: %s, range: (%d, %d)' % (intv, cur_start, cur_end))
if count == 0:
base_tidx = cur_tidx
base_start = cur_start
base_end = cur_end
base_width = cur_tr_w
count = 1
pitch = 0
else:
assert base_tidx is not None, "count == 0 should have set base_intv"
assert base_width is not None, "count == 0 should have set base_width"
assert base_start is not None, "count == 0 should have set base_start"
assert base_end is not None, "count == 0 should have set base_end"
if cur_start == base_start and cur_end == base_end and base_width == cur_tr_w:
# length and width matches
cur_pitch = cur_tidx - last_tidx
if count == 1:
# second wire, set half pitch
pitch = cur_pitch
count += 1
elif pitch == cur_pitch:
# pitch matches
count += 1
else:
# pitch does not match, add current wires and start anew
track_id = TrackID(layer_id, base_tidx, width=base_width,
num=count, pitch=pitch, grid=grid)
warr = WireArray(track_id, base_start, base_end)
new_warr_list.append(warr)
self._layout.add_warr(track_id, base_start, base_end)
base_tidx = cur_tidx
count = 1
pitch = 0
else:
# length/width does not match, add cumulated wires and start anew
track_id = TrackID(layer_id, base_tidx, width=base_width,
num=count, pitch=pitch, grid=grid)
warr = WireArray(track_id, base_start, base_end)
new_warr_list.append(warr)
self._layout.add_warr(track_id, base_start, base_end)
base_start = cur_start
base_end = cur_end
base_tidx = cur_tidx
base_width = cur_tr_w
count = 1
pitch = 0
# update last lower coordinate
last_tidx = cur_tidx
if base_tidx is None:
# no wires given at all
return []
assert base_tidx is not None, "count == 0 should have set base_intv"
assert base_start is not None, "count == 0 should have set base_start"
assert base_end is not None, "count == 0 should have set base_end"
# add last wires
track_id = TrackID(layer_id, base_tidx, base_width, num=count, pitch=pitch, grid=grid)
warr = WireArray(track_id, base_start, base_end)
self._layout.add_warr(track_id, base_start, base_end)
new_warr_list.append(warr)
self._use_color = True
return new_warr_list
def connect_bbox_to_tracks(self, layer_dir: Direction, lay_purp: | |
# For image output to bmp file
import numpy as np
import imageio
# For image operation
from library.image_tool_box import *
# For math/statistic operation
from library.math_tool_box import StatMaker
import math
### 1. Downlaod and unpack those 4 test data from MNIST database.
# train-images-idx3-ubyte.gz: training set images (9912422 bytes)
# train-labels-idx1-ubyte.gz: training set labels (28881 bytes)
# t10k-images-idx3-ubyte.gz: test set images (1648877 bytes)
# t10k-labels-idx1-ubyte.gz: test set labels (4542 bytes)
# MNIST database
# http://yann.lecun.com/exdb/mnist/
# This is is completed, and they're saved in sub-directory "./data_of_mAiLab003"
file_name_of_MNIST_image = 'train-images.idx3-ubyte'
file_name_of_MNIST_label = 'train-labels.idx1-ubyte'
data_directory_path = 'data_of_mAiLab003/'
path_of_MNIST_image = data_directory_path + file_name_of_MNIST_image
path_of_MNIST_label = data_directory_path + file_name_of_MNIST_label
with open(path_of_MNIST_image, 'rb') as file_handle:
# Read header of MNIST image file
header_return = get_MNIST_image_header(file_handle)
if -1 == header_return:
# Handle End-of-File, or exception
pass
else:
(img_height, img_width) = header_return
image_container = []
for index in range(10):
image_return = read_one_MNIST_image(file_handle, img_height, img_width)
if -2 == image_return:
# Handle exception
print("Error occurs in index {:0>2d}".format( index ) )
break
else:
image_matrix = image_return
# Push image_matrix into container
image_container.append(image_matrix)
average_image_of_first_ten = gen_average_image( image_container )
### 2. Output first image from test file, "train-images.idx3-ubyte", with image size 28 x 28.
# print_first image
print("First image array:")
print_image_array( image_container[0] )
### 3. Output the average image (with rounding down to nearest integer) of the first ten from test file
# , "train-images.idx3-ubyte", with image size 28 x 28.
# print average image of first ten
print("Average image array of first ten:")
print_image_array( average_image_of_first_ten )
### 4. Output the average label value (with rounding down to hundredths) of the first ten from test file
# , "train-labels.idx1-ubyte".
with open(path_of_MNIST_label, 'rb') as file_handle:
# Read header of MNIST label file
header_return = get_MNIST_label_header(file_handle)
if -1 == header_return:
# Handle End-of-File, or exception
pass
else:
number_of_items = header_return
# Read first 10 labels, then save them into label_series
label_series = list( file_handle.read(10) )
label_stat = StatMaker( label_series )
avg = label_stat.get_avg()
# rounding down to hundredths
avg = math.floor( (avg * 100) / 100 )
print("The average value of first ten labels in '{:<20}' is {:+02.2f}\n".format( str(file_name_of_MNIST_label), avg ) )
### 5. Extend and output first image to 32x32 from test file, with zero padding on boundary area.
new_side_length = 32
original_side_length = img_width
padding_size = (new_side_length - original_side_length)//2
print("First image array extneds to 32x32 with zero padding over boundary:")
print_image_array_with_padding( image_container[0], padding_size )
### 6. Output and save first image as BitMap(.bmp) file.
print("First image is saved into 'first_image.bmp'.")
# Convert python 2D array(list) to numpy array on datatype uint8.
# data type: np.uint8 = Unsigned 8 bit integer, from 0 to 255
first_image = np.array( object=image_container[0], dtype=np.uint8 )
# Save it from numpy array to bmp file
imageio.imwrite('first_image.bmp', first_image)
'''
Example output:
### 1. Downlaod and unpack those 4 test data from MNIST database.
# This is is completed, and they're saved in sub-directory "./data_of_mAiLab003"
### 2. Output first image from test file, "train-images.idx3-ubyte", with image size 28 x 28.
First image array:
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 00 00 00 00 03 12 12 12 7E 88 AF 1A A6 FF F7 7F 00 00 00 00
00 00 00 00 00 00 00 00 1E 24 5E 9A AA FD FD FD FD FD E1 AC FD F2 C3 40 00 00 00 00
00 00 00 00 00 00 00 31 EE FD FD FD FD FD FD FD FD FB 5D 52 52 38 27 00 00 00 00 00
00 00 00 00 00 00 00 12 DB FD FD FD FD FD C6 B6 F7 F1 00 00 00 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 50 9C 6B FD FD CD 0B 00 2B 9A 00 00 00 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 00 0E 01 9A FD 5A 00 00 00 00 00 00 00 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 00 00 00 8B FD BE 02 00 00 00 00 00 00 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 00 00 00 0B BE FD 46 00 00 00 00 00 00 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 00 00 00 00 23 F1 E1 A0 6C 01 00 00 00 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 00 00 00 00 00 51 F0 FD FD 77 19 00 00 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 00 00 00 00 00 00 2D BA FD FD 96 1B 00 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 10 5D FC FD BB 00 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 F9 FD F9 40 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 00 00 00 00 00 00 2E 82 B7 FD FD CF 02 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 00 00 00 00 27 94 E5 FD FD FD FA B6 00 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 00 00 18 72 DD FD FD FD FD C9 4E 00 00 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 17 42 D5 FD FD FD FD C6 51 02 00 00 00 00 00 00 00 00 00 00
00 00 00 00 00 00 12 AB DB FD FD FD FD C3 50 09 00 00 00 00 00 00 00 00 00 00 00 00
00 00 00 00 37 AC E2 FD FD FD FD F4 85 0B 00 00 00 00 00 00 00 00 00 00 00 00 00 00
00 00 00 00 88 FD FD FD D4 87 84 10 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
### 3. Output the average image (with rounding down to nearest integer) of the first ten from test file
# , "train-images.idx3-ubyte", with image size 28 x 28.
Average image array of first ten:
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 | |
that are selected to be exposed
Returns:
the prediction of the model in JSON format
"""
fileio = open(filename, 'rb')
if is_video is True:
input = Video(file_obj=fileio)
else:
input = Image(file_obj=fileio)
output_config=ModelOutputConfig(language=lang,
min_value=min_value,
max_concepts=max_concepts,
select_concepts=select_concepts)
res = self.predict([input], output_config)
return res
def predict_by_bytes(self, raw_bytes, lang=None, is_video=False,
min_value=None, max_concepts=None, select_concepts=None):
""" predict a model with image raw bytes
Args:
raw_bytes: raw bytes of an image
lang: language to predict, if the translation is available
is_video: whether this is a video
min_value: threshold to cut the predictions, 0-1.0
max_concepts: max concepts to keep in the predictions, 0-200
select_concepts: a list of concepts that are selected to be exposed
Returns:
the prediction of the model in JSON format
"""
base64_bytes = base64_lib.b64encode(raw_bytes)
if is_video is True:
input = Video(base64=base64_bytes)
else:
input = Image(base64=base64_bytes)
output_config=ModelOutputConfig(language=lang,
min_value=min_value,
max_concepts=max_concepts,
select_concepts=select_concepts)
res = self.predict([input], output_config)
return res
def predict_by_base64(self, base64_bytes, lang=None, is_video=False,
min_value=None, max_concepts=None, select_concepts=None):
""" predict a model with base64 encoded image bytes
Args:
base64_bytes: base64 encoded image bytes
lang: language to predict, if the translation is available
is_video: whether this is a video
min_value: threshold to cut the predictions, 0-1.0
max_concepts: max concepts to keep in the predictions, 0-200
select_concepts: a list of concepts that are selected to be exposed
Returns:
the prediction of the model in JSON format
"""
if is_video is True:
input = Video(base64=base64_bytes)
else:
input = Image(base64=base64_bytes)
model_output_config = ModelOutputConfig(language=lang,
min_value=min_value,
max_concepts=max_concepts,
select_concepts=select_concepts)
res = self.predict([input], model_output_info)
return res
def predict(self, inputs, output_config=None):
""" predict with multiple images
Args:
inputs: a list of Image objectsg
output_config: output_config for more prediction parameters
Returns:
the prediction of the model in JSON format
"""
res = self.api.predict_workflow(self.wf_id, inputs, output_config)
return res
class WorkflowNode(object):
""" the node in the workflow
"""
def __init__(self, wf_node):
self.node_id = wf_node['id']
self.model_id = wf_node['model']['id']
self.model_version_id = wf_node['model']['model_version']['id']
def dict(self):
node = {
'id': self.node_id,
'model': {
'id': self.model_id,
'model_version': {
'id': self.model_version_id
}
}
}
return node
class Workflows(object):
def __init__(self, api):
self.api = api
def get_all(self, public_only=False):
""" get all workflows in the application
Args:
public_only: whether to get public workflow
Returns:
a generator that yields Workflow object
Examples:
>>> for workflow in app.workflows.get_all():
>>> print workflow.id
"""
res = self.api.get_workflows(public_only)
# FIXME(robert): hack to correct the empty workflow
if not res.get('workflows'):
res['workflows'] = []
if not res['workflows']:
return
for one in res['workflows']:
workflow = Workflow(self.api, one)
yield workflow
def get_by_page(self, public_only=False, page=1, per_page=20):
""" get paginated workflows from the application
When the number of workflows get high, you may want to get
the paginated results from all the models
Args:
public_only: whether to get public workflow
page: page number
per_page: number of models returned in one page
Returns:
a list of Workflow objects
Examples:
>>> workflows = app.workflows.get_by_page(2, 20)
"""
res = self.api.get_workflows(public_only)
results = [Workflow(self.api, one) for one in res['workflows']]
return results
def get(self, workflow_id):
""" get workflow by id
Args:
workflow_id: ID of the workflow
Returns:
A Workflow object or None
Examples:
>>> workflow = app.workflows.get('General')
"""
res = self.api.get_workflow(workflow_id)
workflow = Workflow(self.api, res['workflow'])
return workflow
class Models(object):
def __init__(self, api, skip_model_cache=False):
self.api = api
# the cache of the model name -> model id mapping
# to avoid an extra model query on every prediction by model name
if not skip_model_cache:
self.model_id_cache = self.init_model_cache()
def init_model_cache(self):
""" Initialize the model cache for the public models
This will go through all public models and cache them
Returns:
JSON object containing the name, type, and id of all cached models
"""
model_cache = {}
# this is a generator, will NOT raise Exception
models = self.get_all(public_only=True)
try:
for m in models:
if not m.output_info.get('type'):
continue
model_name = m.model_name
model_type = m.output_info['type']
model_id = m.model_id
model_cache.update({(model_name, model_type): model_id})
# for general-v1.3 concept model, make an extra cache entry
if model_name == 'general-v1.3' and model_type == 'concept':
model_cache.update({(model_name, None): model_id})
except ApiError as e:
if e.error_code == 11007:
logger.debug("not authorized to call GET /models. Unable to cache models")
models = []
pass
else:
raise e
return model_cache
def clear_model_cache(self):
""" clear model_name -> model_id cache
WARNING: This is an internal function, user should not call this
We cache model_name to model_id mapping for API efficiency.
The first time you call a models.get() by name, the name to ID
mapping is saved so next time there is no query. Then user does not
have to query the model ID every time when they want to work on it.
"""
self.model_id_cache = {}
def create(self, model_id, model_name=None, concepts=None, concepts_mutually_exclusive=False,
closed_environment=False, hyper_parameters=None):
""" Create a new model
Args:
model_id: ID of the model
model_name: optional name of the model
concepts: optional concepts to be associated with this model
concepts_mutually_exclusive: True or False, whether concepts are mutually exclusive
closed_environment: True or False, whether to use negatives for prediction
hyper_parameters: hyper parameters for the model, with a json object
Returns:
Model object
Examples:
>>> # create a model with no concepts
>>> app.models.create('my_model1')
>>> # create a model with a few concepts
>>> app.models.create('my_model2', concepts=['bird', 'fish'])
>>> # create a model with closed environment
>>> app.models.create('my_model3', closed_environment=True)
"""
if not model_name:
model_name = model_id
res = self.api.create_model(model_id, model_name, concepts, concepts_mutually_exclusive,
closed_environment, hyper_parameters)
if res.get('model'):
model = self._to_obj(res['model'])
elif res.get('status'):
status = res['status']
raise UserError('code: %d, desc: %s, details: %s' %
(status['code'], status['description'], status['details']))
return model
def _is_public(self, model):
""" use app_id to determine whether it is a public model
For public model, the app_id is either '' or 'main'
For private model, the app_id is not empty but not 'main'
"""
app_id = model.app_id
if app_id == '' or app_id == 'main':
return True
else:
return False
def get_all(self, public_only=False, private_only=False):
""" Get all models in the application
Args:
public_only: only yield public models
private_only: only yield private models that tie to your own account
Returns:
a generator function that yields Model objects
Examples:
>>> for model in app.models.get_all():
>>> print model.model_name
"""
page = 1
per_page = 20
while True:
res = self.api.get_models(page, per_page)
if not res['models']:
break
for one in res['models']:
model = self._to_obj(one)
if public_only is True and not self._is_public(model):
continue
if private_only is True and self._is_public(model):
continue
yield model
page += 1
def get_by_page(self, public_only=False, private_only=False, page=1, per_page=20):
""" get paginated models from the application
When the number of models gets high, you may want to get
the paginated results from all the models
Args:
public_only: only yield public models
private_only: only yield private models that tie to your own account
page: page number
per_page: number of models returned in one page
Returns:
a list of Model objects
Examples:
>>> models = app.models.get_by_page(2, 20)
"""
res = self.api.get_models(page, per_page)
results = [self._to_obj(one) for one in res['models']]
if public_only is True:
results = filter(lambda m: self._is_public(m), results)
elif private_only is True:
results = filter(lambda m: not self._is_public(m), results)
return results
def delete(self, model_id, version_id=None):
""" delete the model, or a specific version of the model
Without model version id specified, all the versions associated with this model
will be deleted as well.
With model version id specified, it will delete a
particular model version from the model
Args:
model_id: the unique ID of the model
version_id: the unique ID of the model version
Returns:
the raw JSON response from the server
Examples:
>>> # delete a model
>>> app.models.delete('model_id1')
>>> # delete a model version
>>> app.models.delete('model_id1', version_id='version1')
"""
if version_id is None:
res = self.api.delete_model(model_id)
else:
res = self.api.delete_model_version(model_id, version_id)
return res
def bulk_delete(self, model_ids):
""" Delete multiple models.
Args:
model_ids: a list of unique IDs of the models to delete
Returns:
the raw JSON response from the server
Examples:
>>> app.models.delete_models(['model_id1', 'model_id2'])
"""
res = self.api.delete_models(model_ids)
return res
def delete_all(self):
""" Delete all models and the versions associated with each one
After this operation, you will have no models in the
| |
<filename>venus/db/yasdl/ast.py
"""Abstract Syntax Tree for YASDL."""
from typing import List, Union
import venus.i18n
_ = venus.i18n.get_my_translator(__file__)
# noinspection PyPep8Naming
class dotted_name(str):
"""This is a special string type that represents a dotted name.
The imp attribute is set if the name is an imp_name.
The absolute attribute is set if the dotted name is absolute
(e.g. starts with the "schema" keyword.)
The min_classes attribute is set if the dotted name has
a min_classes specification. In that case, it should be a set
of ast.YASDLItem subclasses.
The ref attribute should point to the statically bound definition.
When not bound, it should be None.
"""
def __init__(self, *args, **kwargs):
str.__init__(*args, **kwargs)
self.imp = False
self.absolute = False
self.min_classes = None
self.ref = None
self.refpath = None
self.lineno = None
self.colno = None
self.owner_schema = None
def items(self):
"""Return a list of items that make up the dotted name."""
return self.split(".")
def get_source_line(self):
"""Get source line code for the dotted name."""
return self.owner_schema.get_source_line_of(self)
#
# This would allow us to store debug information in STRING literals. Also see comments in lex.py
# But we don't do this because it is not elegant. Also for singletons like True or None this method would not work.
#
# class token_str(str):
# """This is a special string that knows where it came from.
#
# The yacc parser assigns the lineno and colno attributes of this object when a string token is created
# from a string literal in the source code."""
# def __init__(self, *args, **kwargs):
# str.__init__(*args, **kwargs)
# self.lineno = None
# self.colno = None
# self.owner_schema = None
#
# def get_source_line(self):
# """Get source line code for the dotted name."""
# return self.owner_schema.get_source_line_of(self)
def is_minclass(obj, min_classes=None):
"""Return if an object is instance of any classes listed.
:param obj: The object to be examined
:param min_classes: List of classes, or None.
:return: Passing an empty list will always return False. Passing None will always return True.
E.g. None means "no restriction", and empty list means "do not accept anything".
"""
if min_classes is None:
return True
else:
return True in [isinstance(obj, cls) for cls in min_classes]
class YASDLSymbol:
"""Represents a special symbol."""
def __init___(self):
self.lineno = None
self.colno = None
class YASDLAll(YASDLSymbol):
"""Represents the ALL symbol."""
pass
class YASDLItem:
"""Base class for AST elements.
AST elements have a name, zero or more modifiers and zero or more
owner items. This forms an ownership tree. Owned items can be
YASDLItem instances, string literals (unicode), integers, floats etc.
"""
def __init__(self, name, items=None):
# Name of the item
self.name = name
# Owned items
if items:
self.items = items
else:
self.items = []
self.lineno = -1 # Will be set by yacc
self.colno = -1 # Will be set by yacc
self._hash = None # Will be set by parser
self.owner = None # Will be setup later with setup_owners
# These below will be set later by the compiler
self.modifiers = []
self.ancestors = []
self.descendants = set([])
self.specifications = set([])
self.implementations = set([])
# This will be set by _cache_static_names() later.
self._snc = {}
# These will be set by _cache_members() later.
self._mbn, self.members = {}, []
self._members_cached = False
self.unused_deletions = None
self.deletions = None
def __iter__(self):
"""Iterate over subitems.
These subitems are all items in the ownership tree, given by the
object. Traversal is depth first.
Please note that the object itself is NOT returned!
See also: iterate(), members, itercontained()
"""
for item in self.items:
if isinstance(item, YASDLItem):
for subitem in item:
yield subitem
yield item
def iterate(self, min_classes=None):
"""Iterate over subitems.
These subitems are all items in the ownership tree, given by the object. Traversal is depth first.
:param min_classes: A list of acceptable classes to return. When None, items of all classes are returned.
This method does NOT yield any inherited members. Unlike __iter__, this method can return itself!
See also: itercontained(), has_member(), contains(), owns()
See also: items, members
"""
for item in self:
if is_minclass(item, min_classes):
yield item
if is_minclass(self, min_classes):
yield self
def owns(self, item):
"""Item is owned the called object, directly or indirectly.
:param item: The item to be tested.
This corresponds to the statical containment in the YASDL source code.
This is NOT a real dynamic containment check, since contained
items can be defined in ancestors or implementors that are
not statically contained.
Please note that obj.owns(obj) returns False!
See also: itercontained(), has_member(), contains(), owns()
See also: items, members
"""
for subitem in iter(self):
if subitem is item:
return True
return False
def _cache_members(self):
"""Cache members and set the members attribute.
Should not be called manually. Should be called by the compiler.
Members are the YASDLItem instances that are specified directly
statically, OR inherited from ancestors. The order of members is
important. First the members defined statically in the definition are
listed. Then members defined by the first listed ancestor are listed.
Then members of the second listed ancestor are listed. Etc.
Each member has a name, and every name is defined only once.
E.g. if "displaylabel" is defined in the object, then it is not
inherited. If "type" is defined in the second ancestor, then it is
not inherited from the third and fourth ancestors etc.
You should only use this method after successful call to
compiler.compile().
Please note that this method does NOT traverse over the ownership tree!
For that, see iterate().
Please also note that this method only iterates over the direct
members. If you also want to iterate over members of members etc.
recursively, then use itercontained().
"""
if self._members_cached:
return
self.deletions = set([])
for item in self.items:
if isinstance(item, YASDLDeletion):
self.deletions.add(item.name)
used_deletions = set([])
self._mbn, self.members = {}, []
# Recursive step: inherit members from ancestors.
if hasattr(self, 'ancestors'):
for ancestor in self.ancestors:
# noinspection PyProtectedMember
ancestor._cache_members()
for inherited_member in ancestor.members:
if isinstance(inherited_member, YASDLItem):
if (inherited_member.name != 'implements') and \
(inherited_member.name != 'ancestors'):
if inherited_member.name in self.deletions:
used_deletions.add(inherited_member.name)
elif inherited_member.name in self._mbn:
old = self._mbn[inherited_member.name]
idx = self.members.index(old)
self._mbn[inherited_member.name] = \
inherited_member
self.members[idx] = inherited_member
else:
self._mbn[inherited_member.name] = \
inherited_member
self.members.append(inherited_member)
# Normal step: our statically defined names.
for item in self.items:
item = getattr(item, 'final_implementor', item)
if isinstance(item, YASDLItem) and \
not isinstance(item, YASDLDeletion):
if item.name in self._mbn:
old = self._mbn[item.name]
idx = self.members.index(old) # TODO: might cache the index too, to speed up replacement!
self._mbn[item.name] = item
self.members[idx] = item
else:
self._mbn[item.name] = item
self.members.append(item)
self.unused_deletions = self.deletions - used_deletions
self._members_cached = True
def has_member(self, name, min_classes=None):
"""Tells if there is a member with the given name.
:param name: Name of the member to search for.
:param min_classes: List of min_classes, or None.
See also: itercontained(), has_member(), contains(), owns()
See also: items, members
"""
if name in self._mbn:
member = self._mbn[name]
if is_minclass(member, min_classes):
return True
return False
def __contains__(self, name):
"""Very similar to has_member, but here you cannot specify min_classes."""
return name in self._mbn
def __getitem__(self, name):
return self._mbn[name]
def itercontained(self, min_classes=None):
"""Similar to members, but it traverses through all
contained definitions, and lists all members of all submembers
recursively, in the right order.
:param min_classes: An iterable of classes. When given, only contained items of the given subclasses will
be yielded.
Yielded values are non-empty lists. These lists contain the
paths that can be used to access the members. To get the member
itself, use the last item of the list.
The most useful way to use this generator is to iterate over
all field definitions in a fieldset, and contained fieldsets,
in the right order.
Please note that this method does NOT return self!
See also: itercontained(), has_member(), contains(), owns()
See also: items, members
"""
for member in self.members:
if is_minclass(member, min_classes):
yield [member]
# if isinstance(member, YASDLFieldSet): ???
for submember_path in member.itercontained(min_classes):
submember_path.insert(0, member)
yield submember_path
def contains(self, item):
"""Tells if the given item is contained within.
:param item: Item to look for
This goes over members. For static containment, use the | |
"""
Tools for generating heatmaps from latitude and longitude data.
MIT License
Original work Copyright (c) 2018 <NAME>
Modified work Copyright 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# imports
import os
import glob
import time
import numpy as np
import matplotlib.pyplot as plt
from urllib.error import URLError
from urllib.request import Request, urlopen
from .parse_fit import get_dataframes
# globals
HEATMAP_MAX_SIZE = (2160, 3840) # maximum heatmap size in pixel
HEATMAP_MAX_SIZE = (2160 // 2, 3840 // 2) # (2160, 3840) # maximum heatmap size in pixel
HEATMAP_MARGIN_SIZE = 32 # margin around heatmap trackpoints in pixel
PLT_COLORMAP = 'hot' # matplotlib color map
OSM_TILE_SERVER = 'https://maps.wikimedia.org/osm-intl/{}/{}/{}.png' # OSM tile url from https://wiki.openstreetmap.org/wiki/Tile_servers
OSM_TILE_SIZE = 256 # OSM tile size in pixel
OSM_MAX_ZOOM = 19 # OSM maximum zoom level
OSM_MAX_TILE_COUNT = 100 # maximum number of tiles to download
METERS_PER_TRACKPOINT = 5.0 # Running approximation. Cycling ~= 5.0
# functions
def deg2xy(lat_deg, lon_deg, zoom):
"""Returns OSM coordinates (x,y) from (lat,lon) in degree"""
# from https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames
lat_rad = np.radians(lat_deg)
n = 2.0**zoom
x = (lon_deg+180.0)/360.0*n
y = (1.0-np.arcsinh(np.tan(lat_rad))/np.pi)/2.0*n
return x, y
def xy2deg(x, y, zoom):
"""Returns (lat, lon) in degree from OSM coordinates (x,y)"""
# from https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames
n = 2.0**zoom
lon_deg = x/n*360.0-180.0
lat_rad = np.arctan(np.sinh(np.pi*(1.0-2.0*y/n)))
lat_deg = np.degrees(lat_rad)
return lat_deg, lon_deg
def gaussian_filter(image, sigma):
"""Returns image filtered with a gaussian function of variance sigma**2"""
i, j = np.meshgrid(np.arange(image.shape[0]),
np.arange(image.shape[1]),
indexing='ij')
mu = (int(image.shape[0]/2.0),
int(image.shape[1]/2.0))
gaussian = 1.0/(2.0*np.pi*sigma*sigma)*np.exp(-0.5*(((i-mu[0])/sigma)**2+\
((j-mu[1])/sigma)**2))
gaussian = np.roll(gaussian, (-mu[0], -mu[1]), axis=(0, 1))
image_fft = np.fft.rfft2(image)
gaussian_fft = np.fft.rfft2(gaussian)
image = np.fft.irfft2(image_fft*gaussian_fft)
return image
def download_tile(tile_url, tile_file):
"""Download tile from url to file, wait 0.1s and return True (False) if (not) successful"""
request = Request(tile_url, headers={'User-Agent':'Mozilla/5.0'})
try:
with urlopen(request) as response:
data = response.read()
except URLError:
return False
with open(tile_file, 'wb') as file:
file.write(data)
time.sleep(0.1)
return True
def add_text(img, text, coords=(0.02, 0.95), fontsize=0.03, fill=(0, 0, 0)):
"""
Adds text to a provided image in numpy.ndarray format.
Coords are in relative position
"""
from PIL import Image, ImageDraw, ImageFont
img *= 255
img = Image.fromarray(img.astype(np.uint8))
width, height = img.size
# Call draw Method to add 2D graphics in an image
I1 = ImageDraw.Draw(img)
# Custom font style and font size
font = ImageFont.truetype('fonts/FreeMonoBold.ttf', int(fontsize*height))
# Add Text to an image
coords = (
int(coords[0] * width),
int(coords[1] * height)
)
I1.text(coords, text, font=font, fill=fill)
return np.array(img)#.astype(np.float)
def get_lat_lon(args):
# read fit trackpoints
fit_files = glob.glob('{}/{}'.format(args.dir,
args.filter))
fit_files = sorted(fit_files)
if not fit_files:
exit('ERROR no data matching {}/{}'.format(args.dir,
args.filter))
lat_lon_list = []
dates = []
lat_bound_min, lat_bound_max, lon_bound_min, lon_bound_max = args.bounds
for fit_file in fit_files:
print('Reading {}'.format(os.path.basename(fit_file)))
_, points_df = get_dataframes(fit_file)
lat_lon = points_df[['latitude', 'longitude']].to_numpy()
# crop to bounding box
lat_lon = lat_lon[np.logical_and(lat_lon[:, 0] > lat_bound_min,
lat_lon[:, 0] < lat_bound_max), :]
lat_lon = lat_lon[np.logical_and(lat_lon[:, 1] > lon_bound_min,
lat_lon[:, 1] < lon_bound_max), :]
if lat_lon.shape[0] > 0:
dates.append(points_df['timestamp'].loc[0].strftime('%b %d, %Y'))
lat_lon_list.append(lat_lon)
lat_lon_data = np.vstack(lat_lon_list)
print(f'Processing {lat_lon_data.shape[0]} coordinates.')
if lat_lon_data.size == 0:
exit('ERROR no data matching {}/{}{}'.format(args.dir,
args.filter,
' with year {}'.format(' '.join(args.year)) if args.year else ''))
if lat_lon_data.size == 0:
exit('ERROR no data matching {}/{} with bounds {}'.format(args.dir, args.filter, args.bounds))
print('Read {} trackpoints'.format(lat_lon_data.shape[0]))
# find tiles coordinates
# if args.crop:
lat_min, lon_min = np.min(lat_lon_data, axis=0)
lat_max, lon_max = np.max(lat_lon_data, axis=0)
# else:
# lat_min, lon_min = lat_bound_min, lon_bound_min
# lat_max, lon_max = lat_bound_max, lon_bound_max
bounds = [lat_min, lat_max, lon_min, lon_max]
return lat_lon_list, dates, bounds
def get_background_map(bounds, args):
lat_min, lat_max, lon_min, lon_max = bounds
if args.zoom > -1:
zoom = min(args.zoom, OSM_MAX_ZOOM)
x_tile_min, y_tile_max = map(int, deg2xy(lat_min, lon_min, zoom))
x_tile_max, y_tile_min = map(int, deg2xy(lat_max, lon_max, zoom))
else:
zoom = OSM_MAX_ZOOM
while True:
x_tile_min, y_tile_max = map(int, deg2xy(lat_min, lon_min, zoom))
x_tile_max, y_tile_min = map(int, deg2xy(lat_max, lon_max, zoom))
if ((x_tile_max-x_tile_min+1)*OSM_TILE_SIZE <= HEATMAP_MAX_SIZE[0] and
(y_tile_max-y_tile_min+1)*OSM_TILE_SIZE <= HEATMAP_MAX_SIZE[1]):
break
zoom -= 1
print('Auto zoom = {}'.format(zoom))
tile_count = (x_tile_max-x_tile_min+1)*(y_tile_max-y_tile_min+1)
if tile_count > OSM_MAX_TILE_COUNT:
exit('ERROR zoom value too high, too many tiles to download')
# download tiles
os.makedirs('tiles', exist_ok=True)
supertile = np.zeros(((y_tile_max-y_tile_min+1)*OSM_TILE_SIZE,
(x_tile_max-x_tile_min+1)*OSM_TILE_SIZE, 3))
n = 0
for x in range(x_tile_min, x_tile_max+1):
for y in range(y_tile_min, y_tile_max+1):
n += 1
tile_file = 'tiles/tile_{}_{}_{}.png'.format(zoom, x, y)
if not glob.glob(tile_file):
print('downloading tile {}/{}'.format(n, tile_count))
tile_url = OSM_TILE_SERVER.format(zoom, x, y)
if not download_tile(tile_url, tile_file):
print('ERROR downloading tile {} failed, using blank tile'.format(tile_url))
tile = np.ones((OSM_TILE_SIZE,
OSM_TILE_SIZE, 3))
plt.imsave(tile_file, tile)
tile = plt.imread(tile_file)
i = y-y_tile_min
j = x-x_tile_min
supertile[i*OSM_TILE_SIZE:(i+1)*OSM_TILE_SIZE,
j*OSM_TILE_SIZE:(j+1)*OSM_TILE_SIZE, :] = tile[:, :, :3]
if not args.orange:
supertile = np.sum(supertile*[0.2126, 0.7152, 0.0722], axis=2) # to grayscale
supertile = 1.0-supertile # invert colors
supertile = np.dstack((supertile, supertile, supertile)) # to rgb
return supertile, zoom, x_tile_min, y_tile_min
def get_trackpoints(lat_lon_list, shape, ij_data, args):
trackpoint_list = np.zeros((len(lat_lon_list), *shape))
# fill trackpoints
sigma_pixel = args.sigma #if not args.orange else 1
# Count number of routes through pixels, with sigma radius
for idx, ij in enumerate(ij_data):
for i, j in ij:
trackpoint_list[idx, i-sigma_pixel:i+sigma_pixel, j-sigma_pixel:j+sigma_pixel] += 1.0
return trackpoint_list
def smooth_trackpoints(data, m, data_hist, args):
sigma_pixel = args.sigma #if not args.orange else 1
data[data > m] = m
for i in range(data.shape[0]):
for j in range(data.shape[1]):
data[i, j] = m*data_hist[int(data[i, j])] # histogram equalization
data = gaussian_filter(data, float(sigma_pixel)) # kernel density estimation with normal kernel
return data
def create_heatmap(supertile, data, ij_data, args):
# colorize
if not args.orange:
cmap = plt.get_cmap(PLT_COLORMAP)
data_color = cmap(data)
data_color[data_color == cmap(0.0)] = 0.0 # remove background color
for c in range(3):
supertile[:, :, c] = (1.0-data_color[:, :, c])*supertile[:, :, c]+data_color[:, :, c]
else:
cmap = plt.get_cmap(PLT_COLORMAP)
data_color = cmap(data)
data_color[data_color == cmap(0.0)] = 0 # remove background color
for c in range(3):
supertile[:, :, c] = (1.0-data) * args.alpha*supertile[:, :, c] + data * data_color[:, :, c]
# color = np.array([255, 82, 0], dtype=float)/255 # orange
# for c in range(3):
# supertile[:, :, c] = np.minimum(supertile[:, :, c]+gaussian_filter(data, 1.0), 1.0) # white
# data = gaussian_filter(data, 0.5) # original given sigma 0.5
# data = (data-data.min())/(data.max()-data.min())
# for c in range(3):
# supertile[:, :, c] = (1.0-data)*supertile[:, :, c]+data*color[c]
# crop image
# if args.crop:
i_min, j_min = np.min(ij_data, axis=0)
i_max, j_max = np.max(ij_data, axis=0)
supertile = supertile[max(i_min-HEATMAP_MARGIN_SIZE, 0):min(i_max+HEATMAP_MARGIN_SIZE, supertile.shape[0]),
max(j_min-HEATMAP_MARGIN_SIZE, 0):min(j_max+HEATMAP_MARGIN_SIZE, supertile.shape[1])]
return supertile
def generate_heatmap(args):
# Get latitude and longitude data
lat_lon_list, dates, bounds = get_lat_lon(args)
lat_lon_data = np.vstack(lat_lon_list)
# Generate the background map at the zoom level
supertile, zoom, x_tile_min, y_tile_min = get_background_map(bounds, args)
# Create the trackpoint data map
shape = supertile.shape[:2]
xy_data = [np.array(deg2xy(lat_lon[:, 0], lat_lon[:, 1], zoom)).T for lat_lon in lat_lon_list]
xy_data = [np.round((xy-[x_tile_min, y_tile_min])*OSM_TILE_SIZE) for xy in xy_data]
ij_data = [np.flip(xy.astype(int), axis=1) for xy in xy_data] # to supertile coordinates
trackpoint_list = get_trackpoints(lat_lon_list, shape, ij_data, args)
res_pixel = 156543.03*np.cos(np.radians(np.mean(np.vstack(lat_lon_list)[:, 0])))/(2.0**zoom) # from https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames
m = np.round((1.0/METERS_PER_TRACKPOINT)*res_pixel*len(trackpoint_list))
data = np.sum(trackpoint_list, axis=0)
# threshold to max accumulation of trackpoint
data[data > m] = m
# equalize histogram and compute kernel density estimation
data_hist, _ = np.histogram(data, bins=int(m+1))
data_hist = np.cumsum(data_hist)/data.size # normalized cumulated histogram
data = smooth_trackpoints(data, m, data_hist, args)
data = smooth_trackpoints(np.sum(trackpoint_list, axis=0), m, data_hist, args)
data = (data-data.min())/(data.max()-data.min()) # normalize to [0,1]
img = create_heatmap(supertile, data, np.vstack(ij_data), args)
img = add_text(img, f'{dates[0]} - {dates[-1]}')
# save image
plt.imsave('{}.png'.format(os.path.splitext(args.output)[0]), img)
print('Saved {}.png'.format(os.path.splitext(args.output)[0]))
# save csv
if args.csv and not args.orange:
csv_file = '{}.csv'.format(os.path.splitext(args.output)[0])
with open(csv_file, 'w') as file:
| |
270, 'width': 240},
'N703iD': { 'color': True, 'depth': 262144, 'height': 270, 'width': 240},
'N703imyu': { 'color': True, 'depth': 262144, 'height': 270, 'width': 240},
'N704imyu': { 'color': True, 'depth': 262144, 'height': 270, 'width': 240},
'N705i': { 'color': True, 'depth': 262144, 'height': 320, 'width': 240},
'N705imyu': { 'color': True, 'depth': 262144, 'height': 320, 'width': 240},
'N706i': { 'color': True, 'depth': 262144, 'height': 320, 'width': 240},
'N706i2': { 'color': True, 'depth': 262144, 'height': 320, 'width': 240},
'N706ie': { 'color': True, 'depth': 262144, 'height': 320, 'width': 240},
'N900i': { 'color': True, 'depth': 65536, 'height': 269, 'width': 240},
'N900iG': { 'color': True, 'depth': 65536, 'height': 269, 'width': 240},
'N900iL': { 'color': True, 'depth': 65536, 'height': 269, 'width': 240},
'N900iS': { 'color': True, 'depth': 65536, 'height': 269, 'width': 240},
'N901iC': { 'color': True, 'depth': 65536, 'height': 270, 'width': 240},
'N901iS': { 'color': True, 'depth': 65536, 'height': 270, 'width': 240},
'N902i': { 'color': True, 'depth': 262144, 'height': 270, 'width': 240},
'N902iL': { 'color': True, 'depth': 262144, 'height': 270, 'width': 240},
'N902iS': { 'color': True, 'depth': 262144, 'height': 270, 'width': 240},
'N902iX': { 'color': True, 'depth': 262144, 'height': 270, 'width': 240},
'N903i': { 'color': True, 'depth': 262144, 'height': 270, 'width': 240},
'N904i': { 'color': True, 'depth': 262144, 'height': 352, 'width': 240},
'N905i': { 'color': True, 'depth': 262144, 'height': 320, 'width': 240},
'N905iBiz': { 'color': True, 'depth': 262144, 'height': 320, 'width': 240},
'N905imyu': { 'color': True, 'depth': 262144, 'height': 320, 'width': 240},
'N906i': { 'color': True, 'depth': 262144, 'height': 320, 'width': 240},
'N906iL': { 'color': True, 'depth': 262144, 'height': 320, 'width': 240},
'N906imyu': { 'color': True, 'depth': 262144, 'height': 320, 'width': 240},
'NM705i': { 'color': True, 'depth': 16777216, 'height': 235, 'width': 231},
'NM706i': { 'color': True, 'depth': 16777216, 'height': 235, 'width': 231},
'NM850iG': { 'color': True, 'depth': 65536, 'height': 144, 'width': 176},
'P01A': { 'color': True, 'depth': 262144, 'height': 350, 'width': 240},
'P01B': { 'color': True, 'depth': 262144, 'height': 331, 'width': 240},
'P01C': { 'color': True, 'depth': 262144, 'height': 331, 'width': 240},
'P02A': { 'color': True, 'depth': 262144, 'height': 350, 'width': 240},
'P02B': { 'color': True, 'depth': 262144, 'height': 331, 'width': 240},
'P02C': { 'color': True, 'depth': 262144, 'height': 331, 'width': 240},
'P03A': { 'color': True, 'depth': 262144, 'height': 350, 'width': 240},
'P03B': { 'color': True, 'depth': 262144, 'height': 331, 'width': 240},
'P04A': { 'color': True, 'depth': 262144, 'height': 350, 'width': 240},
'P04B': { 'color': True, 'depth': 262144, 'height': 331, 'width': 240},
'P05A': { 'color': True, 'depth': 262144, 'height': 350, 'width': 240},
'P05B': { 'color': True, 'depth': 262144, 'height': 331, 'width': 240},
'P06A': { 'color': True, 'depth': 262144, 'height': 350, 'width': 240},
'P06B': { 'color': True, 'depth': 262144, 'height': 325, 'width': 240},
'P07A': { 'color': True, 'depth': 262144, 'height': 331, 'width': 240},
'P07B': { 'color': True, 'depth': 262144, 'height': 331, 'width': 240},
'P08A': { 'color': True, 'depth': 262144, 'height': 331, 'width': 240},
'P09A': { 'color': True, 'depth': 262144, 'height': 331, 'width': 240},
'P10A': { 'color': True, 'depth': 262144, 'height': 350, 'width': 240},
'P2002': { 'color': True, 'depth': 65536, 'height': 128, 'width': 118},
'P2101V': { 'color': True, 'depth': 262144, 'height': 182, 'width': 163},
'P2102V': { 'color': True, 'depth': 262144, 'height': 198, 'width': 176},
'P700i': { 'color': True, 'depth': 65536, 'height': 270, 'width': 240},
'P701iD': { 'color': True, 'depth': 65536, 'height': 270, 'width': 240},
'P702i': { 'color': True, 'depth': 262144, 'height': 270, 'width': 240},
'P702iD': { 'color': True, 'depth': 262144, 'height': 270, 'width': 240},
'P703i': { 'color': True, 'depth': 262144, 'height': 270, 'width': 240},
'P703imyu': { 'color': True, 'depth': 262144, 'height': 270, 'width': 240},
'P704i': { 'color': True, 'depth': 262144, 'height': 270, 'width': 240},
'P704imyu': { 'color': True, 'depth': 262144, 'height': 270, 'width': 240},
'P705i': { 'color': True, 'depth': 262144, 'height': 350, 'width': 240},
'P705iCL': { 'color': True, 'depth': 262144, 'height': 350, 'width': 240},
'P705imyu': { 'color': True, 'depth': 262144, 'height': 350, 'width': 240},
'P706ie': { 'color': True, 'depth': 262144, 'height': 350, 'width': 240},
'P706imyu': { 'color': True, 'depth': 262144, 'height': 350, 'width': 240},
'P851i': { 'color': True, 'depth': 65536, 'height': 270, 'width': 240},
'P900i': { 'color': True, 'depth': 65536, 'height': 266, 'width': 240},
'P900iV': { 'color': True, 'depth': 262144, 'height': 266, 'width': 240},
'P901i': { 'color': True, 'depth': 65536, 'height': 270, 'width': 240},
'P901iS': { 'color': True, 'depth': 65536, 'height': 270, 'width': 240},
'P901iTV': { 'color': True, 'depth': 65536, 'height': 270, 'width': 240},
'P902i': { 'color': True, 'depth': 262144, 'height': 270, 'width': 240},
'P902iS': { 'color': True, 'depth': 262144, 'height': 270, 'width': 240},
'P903i': { 'color': True, 'depth': 262144, 'height': 270, 'width': 240},
'P903iTV': { 'color': True, 'depth': 262144, 'height': 350, 'width': 240},
'P903iX': { 'color': True, 'depth': 262144, 'height': 270, 'width': 240},
'P904i': { 'color': True, 'depth': 262144, 'height': 350, 'width': 240},
'P905i': { 'color': True, 'depth': 262144, 'height': 350, 'width': 240},
'P905iTV': { 'color': True, 'depth': 262144, 'height': 350, 'width': 240},
'P906i': { 'color': True, 'depth': 262144, 'height': 350, 'width': 240},
'SA700iS': { 'color': True, 'depth': 65536, 'height': 252, 'width': 240},
'SA702i': { 'color': True, 'depth': 65536, 'height': 252, 'width': 240},
'SA800i': { 'color': True, 'depth': 65536, 'height': 252, 'width': 240},
'SH01A': { 'color': True, 'depth': 16777216, 'height': 320, 'width': 240},
'SH01B': { 'color': True, 'depth': 16777216, 'height': 323, 'width': 240},
'SH02A': { 'color': True, 'depth': 16777216, 'height': 320, 'width': 240},
'SH02B': { 'color': True, 'depth': 16777216, 'height': 323, 'width': 240},
'SH03A': { 'color': True, 'depth': 16777216, 'height': 320, 'width': 240},
'SH03B': { 'color': True, 'depth': 16777216, 'height': 296, 'width': 240},
'SH04A': { 'color': True, 'depth': 16777216, 'height': 320, 'width': 240},
'SH04B': { 'color': True, 'depth': 16777216, 'height': 296, 'width': 240},
'SH05A': { 'color': True, 'depth': 16777216, 'height': 296, 'width': 240},
'SH05B': { 'color': True, 'depth': 16777216, 'height': 323, 'width': 240},
'SH06A': { 'color': True, 'depth': 16777216, 'height': 296, 'width': 240},
'SH06B': { 'color': True, 'depth': 16777216, 'height': 323, 'width': 240},
'SH07A': { 'color': True, 'depth': 16777216, 'height': 296, 'width': 240},
'SH07B': { 'color': True, 'depth': 16777216, 'height': 328, 'width': 240},
'SH08A': { 'color': True, 'depth': 16777216, 'height': 296, 'width': 240},
'SH08B': { 'color': True, 'depth': 16777216, 'height': 323, 'width': 240},
'SH09B': { 'color': True, 'depth': 16777216, 'height': 323, 'width': 240},
'SH2101V': { 'color': True, 'depth': 65536, 'height': 600, 'width': 800},
'SH700i': { 'color': True, 'depth': 262144, 'height': 252, 'width': 240},
'SH700iS': { 'color': True, 'depth': 262144, 'height': 252, 'width': 240},
'SH702iD': { 'color': True, 'depth': 262144, 'height': 240, 'width': 240},
'SH702iS': { 'color': True, 'depth': 262144, 'height': 240, 'width': 240},
'SH703i': { 'color': True, 'depth': 262144, 'height': 240, 'width': 240},
'SH704i': { 'color': True, 'depth': 262144, 'height': 320, 'width': 240},
'SH705i': { 'color': True, 'depth': 262144, 'height': 320, 'width': 240},
'SH705i2': { 'color': True, 'depth': 262144, 'height': 320, 'width': 240},
'SH706i': { 'color': True, 'depth': 16777216, 'height': 320, 'width': 240},
'SH706ie': { 'color': True, 'depth': 262144, 'height': 320, 'width': 240},
'SH706iw': { 'color': True, 'depth': 16777216, 'height': 320, 'width': 240},
'SH851i': { 'color': True, 'depth': 262144, 'height': 252, 'width': 240},
'SH900i': { 'color': True, 'depth': 262144, 'height': 252, 'width': 240},
'SH901iC': { 'color': True, 'depth': 262144, 'height': 252, 'width': 240},
'SH901iS': { 'color': True, 'depth': 262144, 'height': 252, 'width': 240},
'SH902i': { 'color': True, 'depth': 262144, 'height': 240, 'width': 240},
'SH902iS': { 'color': True, 'depth': 262144, 'height': 240, 'width': 240},
'SH902iSL': { 'color': True, 'depth': 262144, 'height': 240, 'width': 240},
'SH903i': { 'color': True, 'depth': 262144, 'height': 320, 'width': 240},
'SH903iTV': { 'color': True, 'depth': 262144, 'height': 320, 'width': 240},
'SH904i': { 'color': True, 'depth': 262144, 'height': 320, 'width': 240},
'SH905i': { 'color': True, 'depth': 16777216, 'height': 320, 'width': 240},
'SH905iTV': { 'color': True, 'depth': 16777216, 'height': 320, 'width': 240},
'SH906i': { 'color': True, 'depth': 16777216, 'height': 320, 'width': 240},
'SH906iTV': { 'color': True, 'depth': 16777216, 'height': 320, 'width': 240},
'SO702i': { 'color': True, 'depth': 262144, 'height': 256, 'width': 240},
'SO703i': { 'color': True, 'depth': 262144, 'height': 368, 'width': 240},
'SO704i': { 'color': True, 'depth': 262144, 'height': | |
import abc
import itertools
from typing import Iterable
from .indexes import Index
from .internal import _InternalAccessor
from .merge import MergeExpression
from .operator import (ArithExpression, BooleanExpression, DataFrameLike,
SeriesLike, StatOpsMixin)
from .series import Series
from .utils import (ORCA_INDEX_NAME_FORMAT, _infer_level,
_unsupport_columns_axis, check_key_existence,
dolphindb_numeric_types, get_orca_obj_from_script,
sql_select, to_dolphindb_literal)
def _orca_groupby_op(func, numeric_only):
def gfunc(self):
return self._groupby_op(func, numeric_only)
return gfunc
def _orca_contextby_op(func, numeric_only):
def cfunc(self):
return self._contextby_op(func, numeric_only)
return cfunc
class GroupByOpsMixin(metaclass=abc.ABCMeta):
all = _orca_groupby_op("all", numeric_only=False)
any = _orca_groupby_op("any", numeric_only=False)
count = _orca_groupby_op("count", numeric_only=False)
size = _orca_groupby_op("size", numeric_only=False)
sum = _orca_groupby_op("sum", numeric_only=True)
sum2 = _orca_groupby_op("sum2", numeric_only=True)
prod = _orca_groupby_op("prod", numeric_only=True)
mean = _orca_groupby_op("mean", numeric_only=True)
median = _orca_groupby_op("median", numeric_only=True)
min = _orca_groupby_op("min", numeric_only=False)
max = _orca_groupby_op("max", numeric_only=False)
std = _orca_groupby_op("std", numeric_only=True)
var = _orca_groupby_op("var", numeric_only=True)
sem = _orca_groupby_op("sem", numeric_only=True)
mad = _orca_groupby_op("mad", numeric_only=True)
skew = _orca_groupby_op("skew", numeric_only=True)
kurtosis = _orca_groupby_op("kurtosis", numeric_only=True)
first = _orca_groupby_op("first", numeric_only=False)
last = _orca_groupby_op("last", numeric_only=False)
ohlc = _orca_groupby_op("ohlc", numeric_only=True)
ffill = _orca_contextby_op("ffill", numeric_only=False)
pad = ffill
bfill = _orca_contextby_op("bfill", numeric_only=False)
backfill = bfill
cumcount = _orca_contextby_op("cumcount", numeric_only=False)
cummax = _orca_contextby_op("cummax", numeric_only=False)
cummin = _orca_contextby_op("cummin", numeric_only=False)
cumprod = _orca_contextby_op("cumprod", numeric_only=True)
cumsum = _orca_contextby_op("cumsum", numeric_only=True)
pct_change = _orca_contextby_op("percentChange", numeric_only=True)
def diff(self, periods=1, axis=0):
_unsupport_columns_axis(self, axis)
if periods != 1:
raise ValueError("periods must be 1")
return self._contextby_op("deltas", numeric_only=True)
_STRING_TO_NUMERIC_ONLY = {
"all": False,
"any": False,
"count": False,
"size": False,
"sum": True,
"sum2": True,
"prod": True,
"mean": True,
"median": True,
"min": False,
"max": False,
"std": True,
"var": True,
"sem": True,
"med": True,
"skew": True,
"kurtosis": True,
"first": False,
"last": False,
"ohlc": True,
"bfill": False,
"ffill": False,
"cumcount": False,
"cummax": False,
"cummin": False,
"cumprod": True,
"cumsum": True,
"pct_change": True,
"diff": True,
}
def rank(self, axis=0, method='min', na_option='top', ascending=True, pct=False, rank_from_zero=False, group_num=None):
from .operator import _check_rank_arguments
func = _check_rank_arguments(axis, method, na_option, ascending, pct, rank_from_zero, group_num)
return self._contextby_op(func, numeric_only=False)
def ols(self, y, x, column_names, intercept=True):
y, _ = check_key_existence(y, self._data_columns)
x, _ = check_key_existence(x, self._data_columns)
if len(y) != 1:
raise ValueError("y must be a single column")
y_script = y[0]
x_script = ",".join(x)
intercept = "true" if intercept else "false"
column_names_literal = to_dolphindb_literal(column_names)
script = f"ols({y_script}, ({x_script}), {intercept}) as {column_names_literal}"
orderby_list = self._orderby_list if self._sort else None
script = sql_select([script], self._var_name, self._where_expr,
groupby_list=self._groupby_list, orderby_list=orderby_list,
asc=self._ascending)
return self._run_groupby_script("ols", script, self._result_index_map)
def aggregate(self, func, *args, **kwargs):
return self._groupby_op(func, False)
agg = aggregate
def apply(self, func, *args, **kwargs):
if not isinstance(func, str):
raise ValueError("Orca does not support callable func; func must be a string representing a DolphinDB function")
select_list = [func]
orderby_list = self._orderby_list if self._sort else None
if isinstance(self._internal, MergeExpression):
var_name = self._internal._from_clause
else:
var_name = self._var_name
script = sql_select(select_list, var_name, self._where_expr,
groupby_list=self._groupby_list, orderby_list=orderby_list,
asc=self._ascending)
return self._run_groupby_script(func, script, self._result_index_map)
def transform(self, func="", *args, **kwargs):
if not isinstance(func, str):
raise ValueError("Orca does not support callable func; func must be a string representing a DolphinDB function")
return self._contextby_op(func, False)
@staticmethod
def _get_groupby_list_orderby_list_and_index_map(groupby_columns, index_names, sort, resample):
index_columns = [ORCA_INDEX_NAME_FORMAT(i) for i in range(len(index_names))]
groupby_list = [f"{groupby_column} as {index_column}"
for groupby_column, index_column in zip(groupby_columns, index_columns)]
if sort:
orderby_list = index_columns
elif resample:
orderby_list = index_columns[-1:]
else:
orderby_list = None
index_map = [(index_column, None) if index_name is None
else (index_column, (index_name,))
for index_name, index_column in zip(index_names, index_columns)]
contextby_index_map = [(index_column, None) if index_name is None
else (index_name, (index_name,))
for index_name, index_column in zip(index_names, index_columns)]
return groupby_list, orderby_list, index_map, contextby_index_map
def _generate_groupby_select_list_and_value_list(self, func, groupkeys, numeric_only):
def check_func_existance(func):
return self._STRING_TO_NUMERIC_ONLY.get(func, False)
def ohlc_select_list(select_col, col):
return [f"first({select_col}) as {col}_open",
f"max({select_col}) as {col}_high",
f"min({select_col}) as {col}_low",
f"last({select_col}) as {col}_close"]
def funcname_alias(func):
ALIAS = {"pad": "ffill", "backfill": "bfill", "pct_change": "percentChange", "diff": "deltas"}
return ALIAS.get(func, func)
select_columns = self._get_data_select_list()
data_columns = self._data_columns
# special functions
if func == "size":
return ["count(*)"], []
if func == "ohlc":
column_ohlcs = (ohlc_select_list(select_col, col)
for select_col, col in zip(select_columns, data_columns))
return list(itertools.chain(*column_ohlcs)), []
if isinstance(func, str):
func = funcname_alias(func)
numeric_only = check_func_existance(func)
elif isinstance(func, list):
select_list = []
func_names = []
for func_name in func:
if not isinstance(func_name, str):
raise TypeError(f"Only strings are supported to be used as function names")
func_names.append(funcname_alias(func_name))
select_list= ([f"{func_name}({col}) as {col}_{func_name}" for func_name in func_names]
for col in select_columns if col not in groupkeys)
select_list = list(itertools.chain(*select_list))
return select_list, []
elif isinstance(func, dict):
select_list = []
for col, func_name in func.items():
if not isinstance(func_name, str):
raise TypeError(f"Only strings are supported to be used as function names")
try:
col_idx = data_columns.index(col)
except ValueError:
raise KeyError(col)
func_name = funcname_alias(func_name)
# check_func_existance(func_name)
select_col = select_columns[col_idx]
if func_name == "ohlc":
select_list.extend(ohlc_select_list(select_col, col))
else:
select_list.append(f"{func_name}({select_col}) as {col}")
return select_list, []
else:
raise TypeError(f"Only strings are supported to be used as function names")
# is_op_on_different_columns = False
if isinstance(self._internal, (ArithExpression, BooleanExpression)):
numeric_only = False
ddb_dtypes = self._ddb_dtypes
select_list = []
value_list = []
for select_col, col in zip(select_columns, data_columns):
if (col not in groupkeys
and (not numeric_only
or ddb_dtypes[col] in dolphindb_numeric_types)):
select_list.append(f"{func}({select_col}) as {col}")
value_list.append(f"{func}({select_col})")
return select_list, value_list
def _run_groupby_script(self, func, script, groupkeys, is_apply=False):
groupby_size = (func == "size")
groupby_having = (func == "")
session = self._session
index = groupkeys if self._as_index or groupby_size or groupby_having else []
if isinstance(func, list):
column_index = ([(col, func_name) for func_name in func]
for col in self._data_columns if col not in self._groupkeys)
column_index = list(itertools.chain(*column_index))
return get_orca_obj_from_script(session, script, index, column_index=column_index)
if func == "ohlc":
column_index = ([(col, "open"), (col, "high"), (col, "low"), (col, "close")] for col in self._data_columns)
column_index = list(itertools.chain(*column_index))
return get_orca_obj_from_script(session, script, index, column_index=column_index)
data = get_orca_obj_from_script(session, script, index)
if groupby_size:
s = data["count"]
s.rename(None, inplace=True)
return s
elif is_apply:
s = data[data._data_columns[0]]
s.rename(None, inplace=True)
return s
elif self._is_series_like:
s = data[data._data_columns[0]]
s.rename(self._name, inplace=True)
return s
else:
return data
def _get_data_select_list(self):
internal = self._internal
if isinstance(internal, (ArithExpression, BooleanExpression, MergeExpression)):
return internal._get_data_select_list()
else:
return self._data_columns
@abc.abstractmethod
def _groupby_op(self, func, numeric_only):
select_list, _ = \
self._generate_groupby_select_list_and_value_list(func, self._groupkeys, numeric_only)
if len(select_list) == 0: # TODO: handle
raise NotImplementedError()
orderby_list = self._orderby_list if self._sort else None
if isinstance(self._internal, MergeExpression):
var_name = self._internal._from_clause
else:
var_name = self._var_name
script = sql_select(select_list, var_name, self._where_expr,
groupby_list=self._groupby_list, orderby_list=orderby_list,
asc=self._ascending)
return self._run_groupby_script(func, script, self._result_index_map)
@abc.abstractmethod
def _contextby_op(self, func, numeric_only): # TODO: context by order
select_list, value_list = \
self._generate_groupby_select_list_and_value_list(func, self._groupkeys, numeric_only)
klass = SeriesContextByExpression if self._is_series_like else DataFrameContextByExpression
return klass(self._session, self._internal, func, self._where_expr, self._name,
select_list, value_list, self._groupby_list)
class ContextByExpression(_InternalAccessor):
"""
Expression related to DolphinDB context by expressions.
"""
def __init__(self, session, internal, func, where_expr, name,
select_list, value_list, groupby_list):
self._session = session
self._internal = internal
self._func = func
self._where_expr = where_expr
self._name = name
self._select_list = select_list
self._value_list = value_list
self._groupby_list = groupby_list
self._as_index = True
def compute(self):
select_list = self._select_list
if len(select_list) == 0:
raise NotImplementedError()
select_list = itertools.chain(self._index_columns, select_list)
script = sql_select(select_list, self._var_name, self._where_expr,
groupby_list=self._groupby_list, is_groupby=False, hint=128)
return GroupByOpsMixin._run_groupby_script(self, self._func, script, self._index_map)
def to_pandas(self):
return self.compute().to_pandas()
def _get_data_select_list(self):
return self._value_list
def _get_contextby_list(self):
return self._groupby_list
class DataFrameContextByExpression(DataFrameLike, ContextByExpression):
pass
class SeriesContextByExpression(SeriesLike, ContextByExpression):
pass
class GroupBy(_InternalAccessor, GroupByOpsMixin, metaclass=abc.ABCMeta):
def __init__(self, session, internal, index, by, level, as_index, sort, ascending, where_expr, name,
groupkeys=None, groupby_list=None, orderby_list=None, result_index_map=None,
contextby_result_index_map=None):
self._session = session
self._internal = internal
self._index = index
self._as_index = as_index
self._sort = sort
self._ascending = ascending
self._where_expr = where_expr
self._name = name
if (groupkeys is not None and groupby_list is not None
and orderby_list is not None and result_index_map is not None
and contextby_result_index_map is not None):
self._groupkeys = groupkeys
self._groupby_list = groupby_list
self._orderby_list = orderby_list
self._result_index_map = result_index_map
self._contextby_result_index_map = contextby_result_index_map
return
index_names = []
groupkeys = []
if by is None and level is None:
raise TypeError("You have to supply one of 'by' and 'level'")
if level is not None:
groupkeys, _, index_names, _ = _infer_level(level, self._index_map)
else:
for column in by:
if isinstance(column, str):
groupkeys.append(column)
index_names.append(column)
elif isinstance(column, Series):
if column._var_name != self._var_name:
raise ValueError("Unable to groupby with an external Series")
groupkeys.append(column._data_columns[0])
index_names.append(column._name)
elif isinstance(column, Index):
if column._var_name != self._var_name:
raise ValueError("Unable to groupby with an external Index")
groupkeys += column._index_columns
index_names += column._index_columns
elif isinstance(column, (ArithExpression, BooleanExpression)):
if not column._is_series_like:
raise ValueError("Grouper is not 1-dimensional")
if column._var_name != self._var_name:
raise ValueError("Unable to groupby with an external Index")
groupkeys.append(column._get_data_select_list()[0])
index_names.append(column._name)
else:
raise ValueError("Each element in by must | |
import contextlib
import functools
import operator
import sys
import threading
import numpy
import six
import chainer
from chainer.backends import cuda
_thread_local = threading.local()
@contextlib.contextmanager
def get_function_check_context(f):
try:
default = _thread_local.current_function
except AttributeError:
default = None
_thread_local.current_function = f
try:
yield
finally:
_thread_local.current_function = default
class TypeInfo(object):
"""Type information of an input/gradient array.
It contains type information of an array, such as the shape of array and
the number of dimensions.
This information is independent of CPU or GPU array.
"""
def __init__(self, shape, dtype):
self.shape = shape
self.dtype = dtype
self.ndim = len(shape)
@property
def size(self):
return functools.reduce(operator.mul, self.shape, 1)
class TypeInfoTuple(tuple):
"""Type information of input/gradient tuples.
It is a sub-class of tuple containing :class:`TypeInfo`. The i-th element
of this object contains type information of the i-th input/gradient data.
As each element is :class:`Expr`, you can easily check its validity.
"""
def size(self):
"""Returns an expression representing its length.
Returns:
Expr: An expression object representing length of the tuple.
"""
return Variable(len(self), '{0}.size'.format(self.name))
class LightTypeInfoTuple(tuple):
"""Type information of input/gradient tuples for light-weight check.
It is a sub-class of tuple containing :class:`TypeInfo`. The i-th element
of this object contains type information of the i-th input/gradient data.
"""
def size(self):
"""Returns its length.
Returns:
int: Length of the tuple.
"""
return len(self)
def get_types(data, name, accept_none):
assert isinstance(data, tuple)
info = TypeInfoTuple(
_get_type(name, i, x, accept_none) for i, x in enumerate(data))
# I don't know a method to set an attribute in an initializer of tuple.
info.name = name
return info
def get_light_types(data):
assert(isinstance(data, tuple))
return LightTypeInfoTuple(data)
def _get_type(name, index, array, accept_none):
var = '{0}[{1}]'.format(name, index)
if accept_none and array is None:
# case that gradient is not given
return Variable(TypeInfo((), None), var)
assert isinstance(array, chainer.get_array_types())
return Variable(TypeInfo(array.shape, array.dtype), var)
def _make_un_operator(exp, priority, func):
def f(x):
return UnaryOperator(priority, x, exp, func)
return f
def _make_bin_operator(exp, priority, func, right_associative=False):
def f(x, y):
return BinaryOperator(priority, x, y, exp, func, right_associative)
return f
def _make_bool_operator(exp, inv, func):
def f(x, y):
return BoolBinaryOperator(x, y, exp, inv, func)
return f
def _flip(f):
return lambda x, y: f(y, x)
class Expr(object):
"""Abstract syntax tree of an expression.
It represents an abstract syntax tree, and isn't a value. You can get its
actual value with :meth:`eval` function, and get syntax representation with
the :meth:`__str__` method.
Each comparison operator (e.g. ``==``) generates a new :class:`Expr` object
which represents the result of comparison between two expressions.
.. admonition:: Example
Let ``x`` and ``y`` be instances of :class:`Expr`, then ::
>>> x = Variable(1, 'x')
>>> y = Variable(1, 'y')
>>> c = (x == y)
is also an instance of :class:`Expr`. To evaluate and get its value,
call :meth:`eval` method::
>>> c.eval()
True
Call ``str`` function to get a representation of the original
equation::
>>> str(c)
'x == y'
You can actually compare an expression with a value::
>>> (x == 1).eval()
True
Note that you can't use boolean operators such as ``and``, as they try
to cast expressions to boolean values::
>>> z = Variable(1, 'z')
>>> x == y and y == z # raises an error
Traceback (most recent call last):
RuntimeError: Don't convert Expr to bool. Please call Expr.eval \
method to evaluate expression.
"""
def __init__(self, priority):
self.priority = priority
def eval(self):
"""Evaluates the tree to get actual value.
Behavior of this function depends on an implementation class.
For example, a binary operator ``+`` calls the ``__add__`` function
with the two results of :meth:`eval` function.
"""
raise NotImplementedError()
def __getattr__(self, name):
return GetAttr(self, name)
def __getitem__(self, key):
return GetItem(self, key)
def __call__(self, *args):
return Call(self, args)
def __nonzero__(self):
# When a user calls a boolean operator like `(x == y and z == w)`,
# `and` operator evaluate the first expression.
# If it returns `True` (and it's default behavior), the `and` operator
# returns *the second expression*, not a boolean value.
# So, `(x == y and z == w)` returns the result of `z == w`, and
# `(x == y and z == w).expect()` raise no errors but only checks
# `z == w`. It is confusing.
# See also:
# https://docs.python.org/3/library/stdtypes.html
msg = ('Don\'t convert Expr to bool. '
'Please call Expr.eval method to evaluate expression.')
raise RuntimeError(msg)
def __bool__(self):
self.__nonzero__()
__eq__ = _make_bool_operator('==', '!=', operator.__eq__)
__ne__ = _make_bool_operator('!=', '==', operator.__ne__)
__lt__ = _make_bool_operator('<', '>=', operator.__lt__)
__le__ = _make_bool_operator('<=', '>', operator.__le__)
__gt__ = _make_bool_operator('>', '<=', operator.__gt__)
__ge__ = _make_bool_operator('>=', '<', operator.__ge__)
# Please refer the Python documentation to know priority of operators.
# https://docs.python.org/3/reference/expressions.html
__add__ = _make_bin_operator('+', 4, operator.__add__)
__radd__ = _flip(__add__)
__sub__ = _make_bin_operator('-', 4, operator.__sub__)
__rsub__ = _flip(__sub__)
__mul__ = _make_bin_operator('*', 5, operator.__mul__)
__rmul__ = _flip(__mul__)
if sys.version_info < (3, 0, 0):
__div__ = _make_bin_operator('/', 5, operator.__div__) # type: ignore # NOQA
__rdiv__ = _flip(__div__)
else:
__truediv__ = _make_bin_operator('/', 5, operator.__truediv__)
__rtruediv__ = _flip(__truediv__)
__floordiv__ = _make_bin_operator('//', 5, operator.__floordiv__)
__rfloordiv__ = _flip(__floordiv__)
__mod__ = _make_bin_operator('%', 5, operator.__mod__)
__rmod__ = _flip(__mod__)
# Only '**' operator is right-associative
__pow__ = _make_bin_operator('**', 7, operator.__mod__,
right_associative=True)
__lshift__ = _make_bin_operator('<<', 3, operator.__lshift__)
__rlshift__ = _flip(__lshift__)
__rshift__ = _make_bin_operator('>>', 3, operator.__rshift__)
__rrshift__ = _flip(__rshift__)
__and__ = _make_bin_operator('&', 2, operator.__and__)
__rand__ = _flip(__and__)
__xor__ = _make_bin_operator('^', 1, operator.__xor__)
__rxor__ = _flip(__xor__)
__or__ = _make_bin_operator('|', 0, operator.__or__)
__ror__ = _flip(__or__)
__neg__ = _make_un_operator('-', 6, operator.__neg__)
__pos__ = _make_un_operator('+', 6, operator.__pos__)
__invert__ = _make_un_operator('~', 6, operator.__invert__)
def _eval_expr(v):
if isinstance(v, Expr):
return v.eval()
elif isinstance(v, list):
return list(map(_eval_expr, v))
elif isinstance(v, tuple):
return tuple(map(_eval_expr, v))
else:
return v
def _repr(v):
if isinstance(v, Expr):
return str(v)
elif isinstance(v, list):
return '[{0}]'.format(', '.join(map(_repr, v)))
elif isinstance(v, tuple):
if len(v) == 0:
return '()'
elif len(v) == 1:
return '({0},)'.format(_repr(v[0]))
else:
return '({0})'.format(', '.join(map(_repr, v)))
else:
return repr(v)
class Atom(Expr):
def __init__(self):
super(Atom, self).__init__(8)
class Constant(Atom):
def __init__(self, value):
super(Constant, self).__init__()
self.value = value
def __str__(self):
return _repr(self.value)
def eval(self):
return self.value
class Variable(Atom):
def __init__(self, value, name):
super(Variable, self).__init__()
self.value = value
self.name = name
def __str__(self):
return self.name
def eval(self):
return self.value
class GetAttr(Atom):
def __init__(self, obj, name):
super(GetAttr, self).__init__()
self.obj = obj
self.name = name
def __str__(self):
if isinstance(self.name, str):
return '{0}.{1}'.format(_repr(self.obj), self.name)
elif (isinstance(self.name, Constant) and
isinstance(self.name.value, str)):
return '{0}.{1}'.format(_repr(self.obj), self.name.value)
else:
return 'getattr({0}, {1})'.format(_repr(self.obj),
_repr(self.name))
def eval(self):
return getattr(_eval_expr(self.obj), _eval_expr(self.name))
def _str_subscript(exp):
if exp is Ellipsis:
return '...'
elif isinstance(exp, slice):
def key_str(v):
return '' if v is None else _repr(v)
if exp.step is None:
return '{0}:{1}'.format(key_str(exp.start),
key_str(exp.stop))
else:
return '{0}:{1}:{2}'.format(key_str(exp.start),
key_str(exp.stop),
key_str(exp.step))
elif isinstance(exp, tuple):
return ', '.join(map(_str_subscript, exp))
else:
return _repr(exp)
class GetItem(Atom):
def __init__(self, obj, key):
super(GetItem, self).__init__()
self.obj = obj
self.key = key
def __str__(self):
key = _str_subscript(self.key)
return '{0}[{1}]'.format(_repr(self.obj), key)
def eval(self):
return _eval_expr(self.obj)[_eval_expr(self.key)]
class Call(Atom):
def __init__(self, obj, args):
assert isinstance(args, tuple)
super(Call, self).__init__()
self.obj = obj
self.args = args
def __str__(self):
return '{0}({1})'.format(_repr(self.obj),
', '.join(map(_repr, self.args)))
def eval(self):
args = map(_eval_expr, self.args)
func = _eval_expr(self.obj)
return func(*args)
class UnaryOperator(Expr):
def __init__(self, priority, term, exp, func):
super(UnaryOperator, self).__init__(priority)
self.term = term
self.exp = exp
self.func = func
def eval(self):
return self.func(_eval_expr(self.term))
def __str__(self):
exp = _repr(self.term)
if isinstance(self.term, Expr) and self.term.priority < self.priority:
exp = '(' + exp + ')'
return self.exp + exp
class BinaryOperator(Expr):
def __init__(self, priority, lhs, rhs, exp, func, right_associative=False):
super(BinaryOperator, self).__init__(priority)
self.lhs = lhs
self.rhs = rhs
self.exp = exp
self.func = func
self.right_associative = right_associative
def eval(self):
left = self._eval_left()
right = self._eval_right()
return self.func(left, right)
def _eval_left(self):
return _eval_expr(self.lhs)
def _eval_right(self):
return _eval_expr(self.rhs)
def __str__(self):
# When an infix operator is left-associative, we need to append parens
# when rhs has the same priority
# e.g. x << (y << z) != x << y << z
left = _repr(self.lhs)
if isinstance(self.lhs, Expr) and (
self.priority > self.lhs.priority or
(self.right_associative and
self.priority == self.lhs.priority)):
left = '(' + left + ')'
right = _repr(self.rhs)
if isinstance(self.rhs, Expr) and (
self.priority > self.rhs.priority or
(not self.right_associative and
self.priority == self.rhs.priority)):
right = '(' + right + ')'
return '{0} {2} {1}'.format(left, right, self.exp)
class Testable(object):
def expect(self):
raise NotImplementedError()
class BoolBinaryOperator(BinaryOperator, | |
#!/usr/bin/env python
"""Simple parsers for the output of linux commands."""
import logging
import os
import re
from builtins import zip # pylint: disable=redefined-builtin
from future.utils import iteritems
from grr_response_core.lib import parser
from grr_response_core.lib.rdfvalues import anomaly as rdf_anomaly
from grr_response_core.lib.rdfvalues import client as rdf_client
# TODO(user): Extend this to resolve repo/publisher to its baseurl.
class YumListCmdParser(parser.CommandParser):
"""Parser for yum list output. Yields SoftwarePackage rdfvalues.
We read the output of yum rather than rpm because it has publishers, and we
don't use bdb because it's a world of hurt and appears to use different,
incompatible versions across OS revisions.
"""
output_types = ["SoftwarePackage"]
supported_artifacts = ["RedhatYumPackagesList"]
def Parse(self, cmd, args, stdout, stderr, return_val, time_taken,
knowledge_base):
"""Parse the yum output."""
_ = stderr, time_taken, args, knowledge_base # Unused.
self.CheckReturn(cmd, return_val)
for line in stdout.splitlines()[1:]: # Ignore first line
cols = line.split()
name_arch, version, source = cols
name, arch = name_arch.split(".")
status = rdf_client.SoftwarePackage.InstallState.INSTALLED
yield rdf_client.SoftwarePackage(
name=name,
publisher=source,
version=version,
architecture=arch,
install_state=status)
class YumRepolistCmdParser(parser.CommandParser):
"""Parser for yum repolist output. Yields PackageRepository.
Parse all enabled repositories as output by yum repolist -q -v.
"""
output_types = ["PackageRepository"]
supported_artifacts = ["RedhatYumRepoList"]
def _re_compile(self, search_str):
return re.compile(r"%s\s*: ([0-9a-zA-Z-\s./#_=:\(\)]*)" % (search_str))
def Parse(self, cmd, args, stdout, stderr, return_val, time_taken,
knowledge_base):
"""Parse the yum repolist output."""
_ = stderr, time_taken, args, knowledge_base # Unused.
self.CheckReturn(cmd, return_val)
output = iter(stdout.splitlines())
repo_regexes = {
"name": self._re_compile("Repo-name"),
"revision": self._re_compile("Repo-revision"),
"last_update": self._re_compile("Repo-updated"),
"num_packages": self._re_compile("Repo-pkgs"),
"size": self._re_compile("Repo-size"),
"baseurl": self._re_compile("Repo-baseurl"),
"timeout": self._re_compile("Repo-expire")
}
repo_id_re = self._re_compile("Repo-id")
for line in output:
match = repo_id_re.match(line)
if match:
repo_info = rdf_client.PackageRepository()
setattr(repo_info, "id", match.group(1).strip())
while line:
for attr, regex in iteritems(repo_regexes):
match = regex.match(line)
if match:
setattr(repo_info, attr, match.group(1).strip())
break
line = output.next()
yield repo_info
class RpmCmdParser(parser.CommandParser):
"""Parser for rpm qa output. Yields SoftwarePackage rdfvalues."""
output_types = ["SoftwarePackage"]
supported_artifacts = ["RedhatPackagesList"]
def Parse(self, cmd, args, stdout, stderr, return_val, time_taken,
knowledge_base):
"""Parse the rpm -qa output."""
_ = time_taken, args, knowledge_base # Unused.
rpm_re = re.compile(r"^(\w[-\w\+]+?)-(\d.*)$")
self.CheckReturn(cmd, return_val)
for line in stdout.splitlines():
pkg_match = rpm_re.match(line.strip())
if pkg_match:
name, version = pkg_match.groups()
status = rdf_client.SoftwarePackage.InstallState.INSTALLED
yield rdf_client.SoftwarePackage(
name=name, version=version, install_state=status)
for line in stderr.splitlines():
if "error: rpmdbNextIterator: skipping h#" in line:
yield rdf_anomaly.Anomaly(
type="PARSER_ANOMALY", symptom="Broken rpm database.")
break
class DpkgCmdParser(parser.CommandParser):
"""Parser for dpkg output. Yields SoftwarePackage rdfvalues."""
output_types = ["SoftwarePackage"]
supported_artifacts = ["DebianPackagesList"]
def Parse(self, cmd, args, stdout, stderr, return_val, time_taken,
knowledge_base):
"""Parse the dpkg output."""
_ = stderr, time_taken, args, knowledge_base # Unused.
self.CheckReturn(cmd, return_val)
column_lengths = []
i = 0
for i, line in enumerate(stdout.splitlines()):
if line.startswith("+++-"):
# This is a special header line that determines column size.
for col in line.split("-")[1:]:
if not re.match("=*", col):
raise parser.ParseError("Invalid header parsing for %s at line "
"%s" % (cmd, i))
column_lengths.append(len(col))
break
if column_lengths:
remaining_lines = stdout.splitlines()[i + 1:]
for i, line in enumerate(remaining_lines):
cols = line.split(None, len(column_lengths))
# The status column is ignored in column_lengths.
if len(column_lengths) == 4:
# Installed, Name, Version, Architecture, Description
status, name, version, arch, desc = cols
elif len(column_lengths) == 3:
# Older versions of dpkg don't print Architecture
status, name, version, desc = cols
arch = None
else:
raise ValueError("Bad number of columns in dpkg --list output: %s" %
len(column_lengths))
# Status is potentially 3 columns, but always at least two, desired and
# actual state. We only care about actual state.
if status[1] == "i":
status = rdf_client.SoftwarePackage.InstallState.INSTALLED
else:
status = rdf_client.SoftwarePackage.InstallState.UNKNOWN
yield rdf_client.SoftwarePackage(
name=name,
description=desc,
version=version,
architecture=arch,
install_state=status)
class DmidecodeCmdParser(parser.CommandParser):
"""Parser for dmidecode output. Yields HardwareInfo rdfvalues."""
output_types = ["HardwareInfo"]
supported_artifacts = ["LinuxHardwareInfo"]
def _re_compile(self, search_str):
return re.compile(r"\s*%s: ([0-9a-zA-Z-\s./#_=]*)" % (search_str))
def Parse(self, cmd, args, stdout, stderr, return_val, time_taken,
knowledge_base):
"""Parse the dmidecode output. All data is parsed into a dictionary."""
_ = stderr, time_taken, args, knowledge_base # Unused.
self.CheckReturn(cmd, return_val)
output = iter(stdout.splitlines())
# Compile all regexes in advance.
sys_info_re = re.compile(r"\s*System Information")
sys_regexes = {
"system_manufacturer": self._re_compile("Manufacturer"),
"serial_number": self._re_compile("Serial Number"),
"system_product_name": self._re_compile("Product Name"),
"system_uuid": self._re_compile("UUID"),
"system_sku_number": self._re_compile("SKU Number"),
"system_family": self._re_compile("Family"),
"system_assettag": self._re_compile("Asset Tag")
}
bios_info_re = re.compile(r"\s*BIOS Information")
bios_regexes = {
"bios_vendor": self._re_compile("Vendor"),
"bios_version": self._re_compile("Version"),
"bios_release_date": self._re_compile("Release Date"),
"bios_rom_size": self._re_compile("ROM Size"),
"bios_revision": self._re_compile("BIOS Revision")
}
# Initialize RDF.
dmi_info = rdf_client.HardwareInfo()
for line in output:
if sys_info_re.match(line):
# Collect all System Information until we hit a blank line.
while line:
for attr, regex in iteritems(sys_regexes):
match = regex.match(line)
if match:
setattr(dmi_info, attr, match.group(1).strip())
break
line = output.next()
elif bios_info_re.match(line):
# Collect all BIOS Information until we hit a blank line.
while line:
for attr, regex in iteritems(bios_regexes):
match = regex.match(line)
if match:
setattr(dmi_info, attr, match.group(1).strip())
break
line = output.next()
yield dmi_info
class PsCmdParser(parser.CommandParser):
"""Parser for '/bin/ps' output. Yields Process rdfvalues."""
output_types = ["Process"]
supported_artifacts = ["ListProcessesPsCommand"]
@classmethod
def Validate(cls, supported_artifact_objects):
"""Perform some extra sanity checks on the ps arguments."""
super(PsCmdParser, cls).Validate(supported_artifact_objects)
for artifact in supported_artifact_objects:
for source in artifact.sources:
if not cls._FindPsOutputFormat(source.attributes["cmd"],
source.attributes["args"]):
raise parser.ParserDefinitionError(
"Artifact parser %s can't process artifact %s. 'ps' command has "
"unacceptable arguments." % (cls.__name__, artifact.name))
@classmethod
def _FindPsOutputFormat(cls, cmd, args):
"""Return our best guess the formating of the "ps" output."""
output_format = []
for arg in args:
# If the "ps" arg contains a comma, it's probably an output format defn.
if "," in arg:
output_format.extend(arg.split(","))
if not output_format:
# Assume a default format for the "-f" style formating.
output_format = [
"user", "pid", "ppid", "pcpu", "not_implemented", "tty",
"not_implemented", "cmd"
]
# Do some sanity checking for the cmd/cmdline if present.
for option in ["cmd", "command", "args"]:
if option in output_format:
if output_format.count(option) > 1:
logging.warn(
"Multiple commandline outputs expected in '%s %s' "
"output. Skipping parsing.", cmd, " ".join(args))
return []
if output_format[-1] != option:
logging.warn(
"'ps's output has the commandline not as the last "
"column. We can't safely parse output of '%s %s'."
"Skipping parsing.", cmd, " ".join(args))
return []
# If we made it here, we should be able to parse the output and we have a
# good idea of it's format.
return output_format
def _SplitCmd(self, cmdline):
"""Split up the command line."""
return cmdline.split()
def _HasHeaders(self, args):
"""Look at the args and decided if we expect headers or not."""
# The default is on.
headers = True
for arg in args:
# Simple cases where it is turn off.
if arg in ["--no-headers", "h", "--no-heading"]:
headers = False
# Simple case where it is turned on.
elif arg in ["--headers"]:
headers = True
# if 'h' appears in a arg, that doesn't start with '-', and
# doesn't look like a format defn. Then that's probably turning it off.
elif "h" in arg and not arg.startswith("-") and "," not in arg:
headers = False
return headers
def Parse(self, cmd, args, stdout, stderr, return_val, time_taken,
knowledge_base):
"""Parse the ps output.
Note that cmdline consumes every field up to the end of line
and as it is string, we can't perfectly see what the arguments
on the command line really were. We just assume a space is the arg
seperator. It's imperfect, but it's better than nothing.
Obviously, if cmd/cmdline is specified, it must be the last
column of output.
Args:
cmd: A string containing the base command that was run.
args: A list of strings containing the commandline args for the command.
stdout: A string containing the stdout of the command run.
stderr: A string containing the stderr of the command run. (Unused)
return_val: The return code following command execution.
time_taken: The time taken to run the process. (Unused)
knowledge_base: An RDF KnowledgeBase. (Unused)
Yields:
RDF Process objects.
"""
_ = stderr, time_taken, knowledge_base # Unused.
self.CheckReturn(cmd, return_val)
if not stdout:
# We have nothing to process so bug out. (Handles a input of None.)
return
rdf_convert_table = {
"pid": ("pid", int),
"tgid": ("pid", int),
"ppid": ("ppid", int),
"comm": ("name", str),
"ucomm": ("name", str),
"ruid": ("real_uid", int),
"uid": ("effective_uid", int),
"euid": ("effective_uid", int),
"suid": ("saved_uid", int),
"svuid": ("saved_uid", int),
"user": ("username", str),
"euser": ("username", str),
"uname": ("username", str),
"rgid": ("real_gid", int),
"gid": ("effective_gid", int),
"egid": | |
%s' % len(ast_node.args))
[arg] = ast_node.args
elem_type = type_declaration_ast_to_ir3_expression_type(arg, compilation_context)
return ir3.SetExpr(elem_type=elem_type, elem_exprs=[])
def int_iterable_sum_expr_ast_to_ir3(ast_node: ast.Call,
compilation_context: CompilationContext,
in_match_pattern: bool,
check_var_reference: Callable[[ast.Name], None]):
if in_match_pattern:
raise CompilationError(compilation_context, ast_node,
'sum() is not allowed in match patterns')
if ast_node.keywords:
raise CompilationError(compilation_context, ast_node.keywords[0].value, 'Keyword arguments are not supported.')
if len(ast_node.args) != 1:
raise CompilationError(compilation_context, ast_node, 'sum() takes 1 argument. Got: %s' % len(ast_node.args))
[arg] = ast_node.args
arg_expr = expression_ast_to_ir3(arg, compilation_context, in_match_pattern, check_var_reference)
if not (isinstance(arg_expr.type, (ir3.ListType, ir3.SetType)) and isinstance(arg_expr.type.elem_type, ir3.IntType)):
notes = []
if isinstance(arg_expr, ir3.VarReference):
lookup_result = compilation_context.get_symbol_definition(arg_expr.name)
assert lookup_result
assert not lookup_result.is_only_partially_defined
notes.append((lookup_result.ast_node, '%s was defined here' % arg_expr.name))
raise CompilationError(compilation_context, arg,
'The argument of sum() must have type List[int] or Set[int]. Got type: %s' % str(arg_expr.type),
notes=notes)
if isinstance(arg_expr.type, ir3.ListType):
return ir3.IntListSumExpr(list_expr=arg_expr)
else:
return ir3.IntSetSumExpr(set_expr=arg_expr)
def bool_iterable_all_expr_ast_to_ir3(ast_node: ast.Call,
compilation_context: CompilationContext,
in_match_pattern: bool,
check_var_reference: Callable[[ast.Name], None]):
if in_match_pattern:
raise CompilationError(compilation_context, ast_node,
'all() is not allowed in match patterns')
if ast_node.keywords:
raise CompilationError(compilation_context, ast_node.keywords[0].value, 'Keyword arguments are not supported.')
if len(ast_node.args) != 1:
raise CompilationError(compilation_context, ast_node, 'all() takes 1 argument. Got: %s' % len(ast_node.args))
[arg] = ast_node.args
arg_expr = expression_ast_to_ir3(arg, compilation_context, in_match_pattern, check_var_reference)
if not (isinstance(arg_expr.type, (ir3.ListType, ir3.SetType)) and isinstance(arg_expr.type.elem_type, ir3.BoolType)):
notes = []
if isinstance(arg_expr, ir3.VarReference):
lookup_result = compilation_context.get_symbol_definition(arg_expr.name)
assert lookup_result
assert not lookup_result.is_only_partially_defined
notes.append((lookup_result.ast_node, '%s was defined here' % arg_expr.name))
raise CompilationError(compilation_context, arg,
'The argument of all() must have type List[bool] or Set[bool]. Got type: %s' % str(arg_expr.type),
notes=notes)
if isinstance(arg_expr.type, ir3.ListType):
return ir3.BoolListAllExpr(list_expr=arg_expr)
else:
return ir3.BoolSetAllExpr(set_expr=arg_expr)
def bool_iterable_any_expr_ast_to_ir3(ast_node: ast.Call,
compilation_context: CompilationContext,
in_match_pattern: bool,
check_var_reference: Callable[[ast.Name], None]):
if in_match_pattern:
raise CompilationError(compilation_context, ast_node,
'any() is not allowed in match patterns')
if ast_node.keywords:
raise CompilationError(compilation_context, ast_node.keywords[0].value, 'Keyword arguments are not supported.')
if len(ast_node.args) != 1:
raise CompilationError(compilation_context, ast_node, 'any() takes 1 argument. Got: %s' % len(ast_node.args))
[arg] = ast_node.args
arg_expr = expression_ast_to_ir3(arg, compilation_context, in_match_pattern, check_var_reference)
if not (isinstance(arg_expr.type, (ir3.ListType, ir3.SetType)) and isinstance(arg_expr.type.elem_type, ir3.BoolType)):
notes = []
if isinstance(arg_expr, ir3.VarReference):
lookup_result = compilation_context.get_symbol_definition(arg_expr.name)
assert lookup_result
assert not lookup_result.is_only_partially_defined
notes.append((lookup_result.ast_node, '%s was defined here' % arg_expr.name))
raise CompilationError(compilation_context, arg,
'The argument of any() must have type List[bool] or Set[bool]. Got type: %s' % str(arg_expr.type),
notes=notes)
if isinstance(arg_expr.type, ir3.ListType):
return ir3.BoolListAnyExpr(list_expr=arg_expr)
else:
return ir3.BoolSetAnyExpr(set_expr=arg_expr)
def _is_structural_equality_check_supported_for_type(type: ir3.ExprType):
if isinstance(type, ir3.BoolType):
return True
elif isinstance(type, ir3.IntType):
return True
elif isinstance(type, ir3.TypeType):
return True
elif isinstance(type, ir3.FunctionType):
return False
elif isinstance(type, ir3.ListType):
return _is_structural_equality_check_supported_for_type(type.elem_type)
elif isinstance(type, ir3.SetType):
return False
elif isinstance(type, ir3.CustomType):
return all(_is_structural_equality_check_supported_for_type(arg_type.type)
for arg_type in type.arg_types)
else:
raise NotImplementedError('Unexpected type: %s' % type.__class__.__name__)
def _is_equality_check_supported_for_type(type: ir3.ExprType):
if isinstance(type, ir3.SetType):
return _is_structural_equality_check_supported_for_type(type.elem_type)
else:
return _is_structural_equality_check_supported_for_type(type)
def eq_ast_to_ir3(lhs_node: ast.AST,
rhs_node: ast.AST,
compilation_context: CompilationContext,
in_match_pattern: bool,
check_var_reference: Callable[[ast.Name], None]):
assert not in_match_pattern
lhs = expression_ast_to_ir3(lhs_node, compilation_context, in_match_pattern, check_var_reference)
rhs = expression_ast_to_ir3(rhs_node, compilation_context, in_match_pattern, check_var_reference)
if lhs.type != rhs.type:
raise CompilationError(compilation_context, lhs_node, 'Type mismatch in ==: %s vs %s' % (
str(lhs.type), str(rhs.type)))
if not _is_equality_check_supported_for_type(lhs.type):
raise CompilationError(compilation_context, lhs_node, 'Type not supported in equality comparison: ' + str(lhs.type))
return ir3.EqualityComparison(lhs=lhs, rhs=rhs)
def not_eq_ast_to_ir3(lhs_node: ast.AST,
rhs_node: ast.AST,
compilation_context: CompilationContext,
in_match_pattern: bool,
check_var_reference: Callable[[ast.Name], None]):
assert not in_match_pattern
lhs = expression_ast_to_ir3(lhs_node, compilation_context, in_match_pattern, check_var_reference)
rhs = expression_ast_to_ir3(rhs_node, compilation_context, in_match_pattern, check_var_reference)
if lhs.type != rhs.type:
raise CompilationError(compilation_context, lhs_node, 'Type mismatch in !=: %s vs %s' % (
str(lhs.type), str(rhs.type)))
if not _is_equality_check_supported_for_type(lhs.type):
raise CompilationError(compilation_context, lhs_node, 'Type not supported in equality comparison: ' + str(lhs.type))
return ir3.NotExpr(expr=ir3.EqualityComparison(lhs=lhs, rhs=rhs))
def _construct_note_diagnostic_for_function_signature(function_lookup_result: SymbolLookupResult):
if isinstance(function_lookup_result.ast_node, ast.ClassDef):
# The __init__ method.
[fun_definition_ast_node] = function_lookup_result.ast_node.body
return fun_definition_ast_node, 'The definition of %s.__init__ was here' % function_lookup_result.symbol.name
else:
return function_lookup_result.ast_node, 'The definition of %s was here' % function_lookup_result.symbol.name
def _construct_note_diagnostic_for_function_arg(function_arg_ast_node: ast.arg):
return function_arg_ast_node, 'The definition of %s was here' % function_arg_ast_node.arg
def function_call_ast_to_ir3(ast_node: ast.Call,
compilation_context: CompilationContext,
in_match_pattern: bool,
check_var_reference: Callable[[ast.Name], None]):
# TODO: allow calls to custom types' constructors.
if in_match_pattern:
raise CompilationError(compilation_context, ast_node,
'Function calls are not allowed in match patterns')
fun_expr = expression_ast_to_ir3(ast_node.func, compilation_context, in_match_pattern, check_var_reference)
if not isinstance(fun_expr.type, ir3.FunctionType):
raise CompilationError(compilation_context, ast_node,
'Attempting to call an object that is not a function. It has type: %s' % str(fun_expr.type))
if ast_node.keywords and ast_node.args:
raise CompilationError(compilation_context, ast_node, 'Function calls with a mix of keyword and non-keyword arguments are not supported. Please choose either style.')
if ast_node.keywords:
if not isinstance(fun_expr, ir3.VarReference):
raise CompilationError(compilation_context, ast_node.keywords[0].value,
'Keyword arguments can only be used when calling a specific function or constructing a specific type, not when calling other callable objects. Please switch to non-keyword arguments.')
lookup_result = compilation_context.get_symbol_definition(fun_expr.name)
assert lookup_result
assert not lookup_result.is_only_partially_defined
if isinstance(lookup_result.ast_node, ast.ClassDef):
is_constructor_call = True
# The __init__ method.
[fun_definition_ast_node] = lookup_result.ast_node.body
else:
# It might still end up being a constructor call, e.g. if the custom type is assigned to a var and then used
# as a function.
is_constructor_call = False
fun_definition_ast_node = lookup_result.ast_node
if not isinstance(fun_definition_ast_node, ast.FunctionDef):
raise CompilationError(compilation_context, ast_node.keywords[0].value,
'Keyword arguments can only be used when calling a specific function or constructing a specific type, not when calling other callable objects. Please switch to non-keyword arguments.',
notes=[(fun_definition_ast_node, 'The definition of %s was here' % ast_node.func.id)])
if is_constructor_call:
# We skip the 'self' parameter.
fun_definition_ast_node_args = fun_definition_ast_node.args.args[1:]
else:
fun_definition_ast_node_args = fun_definition_ast_node.args.args
arg_expr_by_name = {keyword_arg.arg: expression_ast_to_ir3(keyword_arg.value, compilation_context, in_match_pattern, check_var_reference)
for keyword_arg in ast_node.keywords}
formal_arg_names = {arg.arg for arg in fun_definition_ast_node_args}
specified_nonexisting_args = arg_expr_by_name.keys() - formal_arg_names
missing_args = formal_arg_names - arg_expr_by_name.keys()
if specified_nonexisting_args and missing_args:
raise CompilationError(compilation_context, ast_node,
'Incorrect arguments in call to %s. Missing arguments: {%s}. Specified arguments that don\'t exist: {%s}' % (
fun_expr.name, ', '.join(sorted(missing_args)), ', '.join(sorted(specified_nonexisting_args))),
notes=[_construct_note_diagnostic_for_function_signature(lookup_result)]
+ [_construct_note_diagnostic_for_function_arg(arg)
for arg in sorted(fun_definition_ast_node_args, key=lambda arg: arg.arg)
if arg.arg in missing_args])
elif specified_nonexisting_args:
raise CompilationError(compilation_context, ast_node,
'Incorrect arguments in call to %s. Specified arguments that don\'t exist: {%s}' % (
fun_expr.name, ', '.join(sorted(specified_nonexisting_args))),
notes=[_construct_note_diagnostic_for_function_signature(lookup_result)])
elif missing_args:
raise CompilationError(compilation_context, ast_node,
'Incorrect arguments in call to %s. Missing arguments: {%s}' % (
fun_expr.name, ', '.join(sorted(missing_args))),
notes=[_construct_note_diagnostic_for_function_arg(arg)
for arg in sorted(fun_definition_ast_node_args, key=lambda arg: arg.arg)
if arg.arg in missing_args])
args = [arg_expr_by_name[arg.arg] for arg in fun_definition_ast_node_args]
for expr, keyword_arg, arg_type, arg_decl_ast_node in zip(args, ast_node.keywords, fun_expr.type.argtypes, fun_definition_ast_node_args):
if expr.type != arg_type:
notes = [_construct_note_diagnostic_for_function_arg(arg_decl_ast_node)]
if isinstance(keyword_arg.value, ast.Name):
lookup_result = compilation_context.get_symbol_definition(keyword_arg.value.id)
assert not lookup_result.is_only_partially_defined
notes.append((lookup_result.ast_node, 'The definition of %s was here' % keyword_arg.value.id))
raise CompilationError(compilation_context, keyword_arg.value,
'Type mismatch for argument %s: expected type %s but was: %s' % (
keyword_arg.arg, str(arg_type), str(expr.type)),
notes=notes)
else:
ast_node_args = ast_node.args or []
args = [expression_ast_to_ir3(arg_node, compilation_context, in_match_pattern, check_var_reference) for arg_node in ast_node_args]
if len(args) != len(fun_expr.type.argtypes):
if isinstance(ast_node.func, ast.Name):
lookup_result = compilation_context.get_symbol_definition(ast_node.func.id)
assert lookup_result
assert not lookup_result.is_only_partially_defined
raise CompilationError(compilation_context, ast_node,
'Argument number mismatch in function call to %s: got %s arguments, expected %s' % (
ast_node.func.id, len(args), len(fun_expr.type.argtypes)),
notes=[_construct_note_diagnostic_for_function_signature(lookup_result)])
else:
raise CompilationError(compilation_context, ast_node,
'Argument number mismatch in function call: got %s arguments, expected %s' % (
len(args), len(fun_expr.type.argtypes)))
for arg_index, (expr, expr_ast_node, arg_type) in enumerate(zip(args, ast_node_args, fun_expr.type.argtypes)):
if expr.type != arg_type:
notes = []
if isinstance(ast_node.func, ast.Name):
lookup_result = compilation_context.get_symbol_definition(ast_node.func.id)
assert lookup_result
if isinstance(lookup_result.ast_node, ast.ClassDef):
is_constructor_call = True
# The __init__ method.
[fun_definition_ast_node] = lookup_result.ast_node.body
else:
# It might still end up being a constructor call, e.g. if the custom type is assigned to a var and then used
# as a function.
is_constructor_call = False
fun_definition_ast_node = lookup_result.ast_node
if not isinstance(fun_definition_ast_node, ast.FunctionDef):
notes.append(_construct_note_diagnostic_for_function_signature(lookup_result))
else:
if is_constructor_call:
# We skip the 'self' parameter.
fun_definition_ast_node_args = fun_definition_ast_node.args.args[1:]
else:
fun_definition_ast_node_args = fun_definition_ast_node.args.args
notes.append(_construct_note_diagnostic_for_function_arg(fun_definition_ast_node_args[arg_index]))
if isinstance(expr_ast_node, ast.Name):
lookup_result = compilation_context.get_symbol_definition(expr_ast_node.id)
assert lookup_result
notes.append((lookup_result.ast_node, 'The definition of %s was here' % expr_ast_node.id))
raise CompilationError(compilation_context, expr_ast_node,
'Type mismatch for argument %s: expected type %s but was: %s' % (
arg_index, str(arg_type), str(expr.type)),
notes=notes)
return ir3.FunctionCall(fun_expr=fun_expr,
args=args,
may_throw=not isinstance(fun_expr, ir3.VarReference)
or fun_expr.is_function_that_may_throw)
def var_reference_ast_to_ir3(ast_node: ast.Name,
compilation_context: CompilationContext,
in_match_pattern: bool,
check_var_reference: Callable[[ast.Name], None]):
assert isinstance(ast_node.ctx, ast.Load)
lookup_result = compilation_context.get_symbol_definition(ast_node.id)
if lookup_result:
if lookup_result.is_only_partially_defined:
raise CompilationError(compilation_context, ast_node,
'Reference to a variable that may or may not have been initialized (depending on which branch was taken)',
notes=[(lookup_result.ast_node, '%s might have been initialized here' % ast_node.id)])
check_var_reference(ast_node)
return ir3.VarReference(type=lookup_result.symbol.type,
name=lookup_result.symbol.name,
is_global_function=lookup_result.symbol_table.parent is None,
is_function_that_may_throw=isinstance(lookup_result.symbol.type, ir3.FunctionType)
and lookup_result.symbol.is_function_that_may_throw)
else:
definition_ast_node = compilation_context.get_partial_function_definition(ast_node.id)
if definition_ast_node:
if compilation_context.current_function_name == ast_node.id:
raise CompilationError(compilation_context, ast_node, 'Recursive function references are only | |
# Copyright 2011 <NAME> (<EMAIL>)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.contrib.sites.models import Site
from django.db import models
from tmapi.exceptions import ModelConstraintException, \
UnsupportedOperationException
from tmapi.indices.literal_index import LiteralIndex
from tmapi.indices.scoped_index import ScopedIndex
from tmapi.indices.type_instance_index import TypeInstanceIndex
from association import Association
from construct_fields import BaseConstructFields
from identifier import Identifier
from item_identifier import ItemIdentifier
from locator import Locator
from reifiable import Reifiable
from subject_identifier import SubjectIdentifier
from subject_locator import SubjectLocator
from topic import Topic
from copy_utils import copy
class TopicMap (BaseConstructFields, Reifiable):
"""Represents a topic map item."""
topic_map_system = models.ForeignKey('TopicMapSystem',
related_name='topic_maps')
iri = models.CharField(max_length=512)
title = models.CharField(max_length=128, blank=True)
base_address = models.CharField(max_length=512, blank=True)
class Meta:
app_label = 'tmapi'
def __init__ (self, *args, **kwargs):
super(TopicMap, self).__init__(*args, **kwargs)
self._indices = {}
def create_association (self, association_type, scope=None,
proxy=Association):
"""Creates an `Association` in this topic map with the
specified type and scope.
:param association_type: the association type
:type association_type: `Topic`
:param scope: scope
:type scope: list of `Topic`s
:param proxy: Django proxy model class
:type proxy: class
:rtype: `Association`
"""
if association_type is None:
raise ModelConstraintException(self, 'The type may not be None')
if self != association_type.topic_map:
raise ModelConstraintException(
self, 'The type is not from this topic map')
association = proxy(type=association_type, topic_map=self)
association.save()
if scope is None:
scope = []
for topic in scope:
if self != topic.topic_map:
raise ModelConstraintException(
self, 'The theme is not from this topic map')
association.scope.add(topic)
return association
def create_empty_topic (self):
"""Returns a `Topic` instance with no other information.
:rtype: `Topic`
"""
topic = Topic(topic_map=self)
topic.save()
return topic
def create_locator (self, reference):
"""Returns a `Locator` instance representing the specified IRI
reference.
The specified IRI reference is assumed to be absolute.
:param reference: a string which uses the IRI notation
:type reference: string
:rtype: `Locator`
"""
return Locator(reference)
def create_topic (self, proxy=Topic):
"""Returns a `Topic` instance with an automatically generated
item identifier.
This method never returns an existing `Topic` but creates a
new one with an automatically generated item identifier.
Returns the newly created `Topic` instance with an automatically
generated item identifier.
:param proxy: Django proxy model class
:type proxy: class
:rtype: `Topic`
"""
topic = proxy(topic_map=self)
topic.save()
address = 'http://%s/tmapi/iid/auto/%d' % \
(Site.objects.get_current().domain, topic.id)
ii = ItemIdentifier(address=address, containing_topic_map=self)
ii.save()
topic.item_identifiers.add(ii)
return topic
def create_topic_by_item_identifier (self, item_identifier):
"""Returns a `Topic` instance with the specified item identifier.
This method returns either an existing `Topic` or creates a
new `Topic` instance with the specified item identifier.
If a topic with the specified item identifier exists in the
topic map, that topic is returned. If a topic with a subject
identifier equal to the specified item identifier exists, the
specified item identifier is added to that topic and the topic
is returned. If neither a topic with the specified item
identifier nor with a subject identifier equal to the subject
identifier exists, a topic with the item identifier is
created.
:param item_identifier: the item identifier the topic should contain
:type item_identifier: `Locator`
:rtype: `Topic`
"""
if item_identifier is None:
raise ModelConstraintException(
self, 'The item identifier may not be None')
reference = item_identifier.to_external_form()
try:
topic = self.topic_constructs.get(
item_identifiers__address=reference)
except Topic.DoesNotExist:
try:
topic = self.topic_constructs.get(
subject_identifiers__address=reference)
except Topic.DoesNotExist:
topic = Topic(topic_map=self)
topic.save()
ii = ItemIdentifier(address=reference, containing_topic_map=self)
ii.save()
topic.item_identifiers.add(ii)
return topic
def create_topic_by_subject_identifier (self, subject_identifier):
"""Returns a `Topic` instance with the specified subject identifier.
This method returns either an existing `Topic` or creates a
new `Topic` instance with the specified subject identifier.
If a topic with the specified subject identifier exists in
this topic map, that topic is returned. If a topic with an
item identifier equal to the specified subject identifier
exists, the specified subject identifier is added to that
topic and the topic is returned. If neither a topic with the
specified subject identifier nor with an item identifier equal
to the subject identifier exists, a topic with the subject
identifier is created.
:param subject_identifier: the subject identifier the topic
should contain
:type subject_identifier: `Locator`
:rtype: `Topic`
"""
if subject_identifier is None:
raise ModelConstraintException(
self, 'The subject identifier may not be None')
reference = subject_identifier.to_external_form()
try:
topic = self.topic_constructs.get(
subject_identifiers__address=reference)
except Topic.DoesNotExist:
try:
topic = self.topic_constructs.get(
item_identifiers__address=reference)
except Topic.DoesNotExist:
topic = Topic(topic_map=self)
topic.save()
si = SubjectIdentifier(topic=topic, address=reference,
containing_topic_map=self)
si.save()
topic.subject_identifiers.add(si)
return topic
def create_topic_by_subject_locator (self, subject_locator):
"""Returns a `Topic` instance with the specified subject locator.
This method returns either an existing `Topic` or creates a
new `Topic` instance with the specified subject locator.
:param subject_locator: the subject locator the topic should
contain
:type subject_locator: `Locator`
:rtype: `Topic`
"""
if subject_locator is None:
raise ModelConstraintException(
self, 'The subject locator may not be None')
reference = subject_locator.to_external_form()
try:
topic = self.topic_constructs.get(
subject_locators__address=reference)
except Topic.DoesNotExist:
topic = Topic(topic_map=self)
topic.save()
sl = SubjectLocator(topic=topic, address=reference,
containing_topic_map=self)
sl.save()
topic.subject_locators.add(sl)
return topic
def get_associations (self):
"""Returns all `Association`s contained in this topic map.
:rtype: `QuerySet` of `Association`s
"""
return self.association_constructs.all()
def get_construct_by_id (self, id, proxy=None):
"""Returns a `Construct` by its (system specific) identifier.
:param id: the identifier of the construct to be returned
:type id: string
:param proxy: Django proxy model
:type proxy: class
:rtype: `Construct`, proxy object, or None
"""
try:
identifier = Identifier.objects.get(pk=int(id),
containing_topic_map=self)
construct = identifier.get_construct()
if proxy is not None and construct is not None:
construct = proxy.objects.get(pk=construct.id)
except Identifier.DoesNotExist:
construct = None
return construct
def get_construct_by_item_identifier (self, item_identifier):
"""Returns a `Construct` by its item identifier.
:param item_identifier: the item identifier of the construct
to be returned
:type item_identifier: `Locator`
:rtype: a construct or None
"""
address = item_identifier.to_external_form()
try:
ii = ItemIdentifier.objects.get(address=address,
containing_topic_map=self)
construct = ii.get_construct()
except ItemIdentifier.DoesNotExist:
construct = None
return construct
def get_index (self, index_interface):
"""Returns the specified index.
:param index_interface: the index to return
:type index_interface: class
:rtype: `Index`
"""
if index_interface not in (LiteralIndex, ScopedIndex,
TypeInstanceIndex):
raise UnsupportedOperationException(
'This TMAPI implementation does not support that index')
if index_interface not in self._indices:
self._indices[index_interface] = index_interface(self)
return self._indices[index_interface]
def get_locator (self):
"""Returns the `Locator` that was used to create the topic map.
Note: The returned locator represents the storage address of
the topic map and implies no further semantics.
:rtype: `Locator`
"""
return Locator(self.iri)
def get_parent (self):
"""Returns None.
:rtype: None
"""
return None
def get_topics (self):
"""Returns all `Topic`s contained in this topic map.
:rtype: `QuerySet` of `Topic`s
"""
return self.topic_constructs.all()
def get_topic_by_subject_identifier (self, subject_identifier):
"""Returns a topic by its subject identifier.
If no topic with the specified subject identifier exists, this
method returns `None`.
:param subject_identifier: the subject identifier of the topic
to be returned
:type subject_identifier: `Locator`
:rtype: `Topic` or `None`
"""
reference = subject_identifier.to_external_form()
try:
topic = self.topic_constructs.get(
subject_identifiers__address=reference)
except Topic.DoesNotExist:
topic = None
return topic
def get_topic_by_subject_locator (self, subject_locator):
"""Returns a topic by its subject locator.
If no topic with the specified subject locator exists, this
method returns `None`.
:param subject_locator: the subject locator of the topic to be
returned
:type subject_locator: `Locator`
:rtype: `Topic` of `None`
"""
reference = subject_locator.to_external_form()
try:
topic = self.topic_constructs.get(
subject_locators__address=reference)
except Topic.DoesNotExist:
topic = None
return topic
def get_topic_map (self):
"""Returns self.
:rtype: `TopicMap`
"""
return self
def merge_in (self, other):
"""Merges the topic map `other` into this topic map.
All `Topic`s and `Association`s and all of their contents in
`other` will be added to this topic map.
All information items in `other` will be merged into this
topic map as defined by the Topic Maps - Data Model (TMDM)
merging rules.
The merge process will not modify `other` in any way.
If this topic map equals `other`, no changes are made to | |
<reponame>alphagov-mirror/stagecraft
import json
from django.test import TestCase
from hamcrest import (
assert_that, equal_to, is_,
has_entry, has_item, has_key, is_not,
has_length, greater_than
)
from stagecraft.apps.users.models import User
from stagecraft.libs.authorization.tests.test_http import with_govuk_signon
from stagecraft.libs.backdrop_client import disable_backdrop_connection
from ...models import Dashboard, Module, ModuleType
from ...views.module import add_module_to_dashboard
from stagecraft.apps.dashboards.tests.factories.factories import(
DashboardFactory,
ModuleFactory,
ModuleTypeFactory)
from stagecraft.apps.datasets.tests.factories import(
DataGroupFactory,
DataTypeFactory,
DataSetFactory)
class ModuleViewsTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.data_group = DataGroupFactory(name='group')
cls.data_type = DataTypeFactory(name='type')
cls.data_set = DataSetFactory(
data_group=cls.data_group,
data_type=cls.data_type,
)
cls.module_type = ModuleTypeFactory(
name='a-type',
schema={
'type': 'object',
'properties': {
'thing': {
'type': 'string',
'required': True
}
},
'$schema': "http://json-schema.org/draft-03/schema#"
}
)
cls.dashboard = DashboardFactory(
published=True,
title='A service',
slug='some-slug',
)
cls.dashboard_without_owner = DashboardFactory(
published=True,
title='Another service',
slug='some-other-slug',
)
cls.user, _ = User.objects.get_or_create(
email='<EMAIL>')
cls.dashboard.owners.add(cls.user)
@classmethod
@disable_backdrop_connection
def tearDownClass(cls):
cls.data_set.delete()
cls.data_type.delete()
cls.data_group.delete()
cls.module_type.delete()
cls.dashboard.delete()
@with_govuk_signon(permissions=['dashboard'])
def test_get_module_by_uuid(self):
module1 = ModuleFactory(
type=self.module_type,
dashboard=self.dashboard,
slug='module-1',
options={},
order=1)
resp = self.client.get(
'/module/{}'.format(module1.id),
HTTP_AUTHORIZATION='Bearer correct-token')
assert_that(resp.status_code, is_(equal_to(200)))
resp_json = json.loads(resp.content)
module_attrs = {
u'info': [],
u'description': u'',
u'parent': None,
u'title': u'title',
u'data_set': None,
u'query_parameters': None,
u'modules': [],
u'slug': u'module-1',
u'options': {},
u'dashboard': {
u'id': str(module1.dashboard_id)
},
u'type': {
u'id': str(module1.type_id)
},
u'id': str(module1.id)
}
assert_that(
resp_json,
equal_to(module_attrs))
@with_govuk_signon(permissions=['dashboard'])
def test_get_module_by_uuid_404s_when_user_not_owner_of_dashboard(self):
module1 = ModuleFactory(
type=self.module_type,
dashboard=self.dashboard_without_owner,
slug='module-1',
options={},
order=1)
resp = self.client.get(
'/module/{}'.format(module1.id),
HTTP_AUTHORIZATION='Bearer correct-token')
assert_that(resp.status_code, is_(equal_to(404)))
def test_module_doesnt_delete(self):
module1 = ModuleFactory(
type=self.module_type,
dashboard=self.dashboard,
slug='module-1',
options={},
order=1)
delete_resp = self.client.delete(
'/module/{}'.format(module1.id),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token')
assert_that(delete_resp.status_code, equal_to(405))
@with_govuk_signon(permissions=['dashboard'])
def test_list_modules_by_uuid_or_slug(self):
module1 = ModuleFactory(
type=self.module_type,
dashboard=self.dashboard,
slug='module-1',
options={},
order=1)
module2 = ModuleFactory(
type=self.module_type,
dashboard=self.dashboard,
slug='module-2',
options={},
order=2)
resp = self.client.get(
'/dashboard/{}/module'.format(self.dashboard.slug),
HTTP_AUTHORIZATION='Bearer correct-token')
assert_that(resp.status_code, is_(equal_to(200)))
resp_json = json.loads(resp.content)
assert_that(len(resp_json), is_(equal_to(2)))
assert_that(
resp_json,
has_item(has_entry('id', str(module1.id))))
assert_that(
resp_json,
has_item(has_entry('id', str(module2.id))))
self.client.get(
'/dashboard/{}/module'.format(self.dashboard.id),
HTTP_AUTHORIZATION='Bearer correct-token')
resp_json = json.loads(resp.content)
assert_that(len(resp_json), is_(equal_to(2)))
assert_that(
resp_json,
has_item(has_entry('id', str(module1.id))))
assert_that(
resp_json,
has_item(has_entry('id', str(module2.id))))
@with_govuk_signon(permissions=['dashboard'])
def test_list_modules_on_dashboard_when_not_owner_returns_404(self):
dashboard2 = DashboardFactory(
published=True,
title='A service',
slug='some-slug2',
)
ModuleFactory(
type=self.module_type,
dashboard=self.dashboard,
slug='module-1',
options={},
order=1)
ModuleFactory(
type=self.module_type,
dashboard=self.dashboard,
slug='module-2',
options={},
order=2)
ModuleFactory(
type=self.module_type,
dashboard=dashboard2,
slug='module-3',
options={},
order=1)
resp = self.client.get(
'/dashboard/{}/module'.format(dashboard2.slug),
HTTP_AUTHORIZATION='Bearer correct-token')
assert_that(resp.status_code, is_(equal_to(404)))
@with_govuk_signon(permissions=['dashboard'])
def test_list_modules_on_dashboard(self):
dashboard2 = DashboardFactory(
published=True,
title='A service',
slug='some-slug2',
)
dashboard2.owners.add(self.user)
module1 = ModuleFactory(
type=self.module_type,
dashboard=self.dashboard,
slug='module-1',
options={},
order=1)
module2 = ModuleFactory(
type=self.module_type,
dashboard=self.dashboard,
slug='module-2',
options={},
order=2)
module3 = ModuleFactory(
type=self.module_type,
dashboard=dashboard2,
slug='module-3',
options={},
order=1)
resp = self.client.get(
'/dashboard/{}/module'.format(self.dashboard.slug),
HTTP_AUTHORIZATION='Bearer correct-token')
assert_that(resp.status_code, is_(equal_to(200)))
resp_json = json.loads(resp.content)
assert_that(len(resp_json), is_(equal_to(2)))
assert_that(
resp_json,
has_item(has_entry('id', str(module1.id))))
assert_that(
resp_json,
has_item(has_entry('id', str(module2.id))))
assert_that(
resp_json,
is_not(has_item(has_entry('id', str(module3.id)))))
@with_govuk_signon(permissions=['dashboard'])
def test_list_modules(self):
ModuleFactory(
type=self.module_type,
dashboard=self.dashboard,
slug='module-1',
options={},
order=1)
ModuleFactory(
type=self.module_type,
dashboard=self.dashboard,
slug='module-2',
options={},
order=2)
resp = self.client.get(
'/modules', HTTP_AUTHORIZATION='Bearer correct-token')
assert_that(resp.status_code, is_(equal_to(200)))
resp_json = json.loads(resp.content)
assert_that(len(resp_json), is_(equal_to(2)))
def test_edit_a_module_by_slug_on_a_dashboard_when_you_are_an_owner(self):
module1 = ModuleFactory(
type=self.module_type,
dashboard=self.dashboard,
slug='module-1',
options={},
order=1)
resp = self.client.put(
'/module/{}'.format(module1.slug),
data=json.dumps({
'slug': 'a-module',
'type_id': str(self.module_type.id),
'title': 'Some module',
'description': 'Some text about the module',
'info': ['foo'],
'options': {
'thing': 'a value',
},
'objects': "some object",
'order': 1,
'modules': [],
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, is_(equal_to(405)))
def test_edit_a_module_by_id_when_you_are_an_owner(self):
module1 = ModuleFactory(
type=self.module_type,
dashboard=self.dashboard,
slug='module-1',
options={},
order=1)
resp = self.client.put(
'/module/{}'.format(module1.id),
data=json.dumps({
'slug': 'a-module',
'type_id': str(self.module_type.id),
'title': 'Some module',
'description': 'Some text about the module',
'info': ['foo'],
'options': {
'thing': 'a value',
},
'objects': "some object",
'order': 1,
'modules': [],
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, is_(equal_to(405)))
def test_edit_a_module_when_not_owner(self):
module1 = ModuleFactory(
type=self.module_type,
dashboard=self.dashboard_without_owner,
slug='module-1',
options={},
order=1)
resp = self.client.put(
'/module/{}'.format(module1.slug),
data=json.dumps({
'slug': 'a-module',
'type_id': str(self.module_type.id),
'title': 'Some module',
'description': 'Some text about the module',
'info': ['foo'],
'options': {
'thing': 'a value',
},
'objects': "some object",
'order': 1,
'modules': [],
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, is_(equal_to(405)))
def test_add_a_module_to_a_dashboard(self):
existing_modules_count = len(Module.objects.all())
resp = self.client.post(
'/dashboard/{}/module'.format(self.dashboard.slug),
data=json.dumps({
'slug': 'a-module',
'type_id': str(self.module_type.id),
'title': 'Some module',
'description': 'Some text about the module',
'info': ['foo'],
'options': {
'thing': 'a value',
},
'objects': "some object",
'order': 1,
'modules': [],
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, is_(equal_to(200)))
assert_that(
len(Module.objects.all()),
greater_than(existing_modules_count))
resp_json = json.loads(resp.content)
assert_that(resp_json, has_key('id'))
assert_that(resp_json, has_entry('slug', 'a-module'))
assert_that(resp_json, has_entry('options', {'thing': 'a value'}))
stored_module = Module.objects.get(id=resp_json['id'])
assert_that(stored_module, is_not(None))
def test_add_a_module_with_no_type(self):
resp = self.client.post(
'/dashboard/{}/module'.format(self.dashboard.slug),
data=json.dumps({
'slug': 'a-module',
'title': 'Some module',
'description': 'Some text about the module',
'info': ['foo'],
'options': {
'thing': 'a value',
},
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, is_(equal_to(400)))
def test_add_a_module_with_empty_type(self):
resp = self.client.post(
'/dashboard/{}/module'.format(self.dashboard.id),
data=json.dumps({
'slug': 'a-module',
'title': 'Some module',
'type_id': '',
'description': 'Some text about the module',
'info': ['foo'],
'options': {
'thing': 'a value',
},
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, is_(equal_to(400)))
def test_add_a_module_to_a_non_existant_dashboard(self):
resp = self.client.post(
'/dashboard/391213f0-336f-11e4-8c21-0800200c9a66/module',
data=json.dumps({
'slug': 'a-module',
'type_id': str(self.module_type.id),
'title': 'Some module',
'description': 'Some text about the module',
'info': ['foo'],
'options': {
'thing': 'a value',
},
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, is_(equal_to(404)))
def test_add_a_module_with_a_data_set_that_doesnt_exist(self):
resp = self.client.post(
'/dashboard/{}/module'.format(self.dashboard.slug),
data=json.dumps({
'slug': 'a-module',
'type_id': str(self.module_type.id),
'data_group': 'bad-group',
'data_type': 'bad-type',
'title': 'Some module',
'description': 'Some text about the module',
'info': ['foo'],
'options': {
'thing': 'a value',
},
'order': 1,
'modules': [],
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, is_(equal_to(400)))
def test_add_a_module_with_an_empty_data_set(self):
resp = self.client.post(
'/dashboard/{}/module'.format(self.dashboard.slug),
data=json.dumps({
'slug': 'a-module',
'type_id': str(self.module_type.id),
'data_group': '',
'data_type': '',
'title': 'Some module',
'description': 'a description',
'info': [],
'options': {
'thing': 'a value',
},
'order': 1,
'modules': [],
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, is_(equal_to(200)))
def test_add_a_module_with_an_empty_data_set_and_query_parameters(self):
resp = self.client.post(
'/dashboard/{}/module'.format(self.dashboard.slug),
data=json.dumps({
'slug': 'a-module',
'type_id': str(self.module_type.id),
'data_group': '',
'data_type': '',
'title': 'Some module',
'description': 'a description',
'info': [],
'options': {
'thing': 'a value',
},
'order': 1,
'modules': [],
'query_parameters': {},
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, is_(equal_to(200)))
def test_add_a_module_with_a_data_set(self):
resp = self.client.post(
'/dashboard/{}/module'.format(self.dashboard.slug),
data=json.dumps({
'slug': 'a-module',
'type_id': str(self.module_type.id),
'data_type': str(self.data_type.name),
'data_group': str(self.data_group.name),
'title': 'Some module',
'description': 'Some text about the module',
'info': ['foo'],
'options': {
'thing': 'a value',
},
'order': 1,
'modules': [],
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, is_(equal_to(200)))
def test_add_a_module_with_a_data_set_and_a_query(self):
resp = self.client.post(
'/dashboard/{}/module'.format(self.dashboard.slug),
data=json.dumps({
'slug': 'a-module',
'type_id': str(self.module_type.id),
'data_type': str(self.data_type.name),
'data_group': str(self.data_group.name),
'title': 'Some module',
'description': 'Some text about the module',
'info': ['foo'],
'options': {
'thing': 'a value',
},
'query_parameters': {
'sort_by': 'thing:desc',
},
'order': 1,
'modules': [],
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, is_(equal_to(200)))
# do some parsing and that
def test_add_a_module_with_a_query_but_no_data_set(self):
resp = self.client.post(
'/dashboard/{}/module'.format(self.dashboard.slug),
data=json.dumps({
'slug': 'a-module',
'type_id': str(self.module_type.id),
'title': 'Some module',
'description': 'Some text about the module',
'info': ['foo'],
'options': {
'thing': 'a value',
},
'query_parameters': {
'collect': ['thing:invalid-collect-thing']
},
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, is_(equal_to(400)))
def test_add_a_module_to_a_dashboard_that_options_violates_schema(self):
resp = self.client.post(
'/dashboard/{}/module'.format(self.dashboard.slug),
data=json.dumps({
'slug': 'a-module',
'type_id': str(self.module_type.id),
'title': 'Some module',
'description': 'Some text about the module',
'info': ['foo'],
'options': {
},
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, is_(equal_to(400)))
def test_add_a_module_to_a_dashboard_queryparams_violates_schema(self):
resp = self.client.post(
'/dashboard/{}/module'.format(self.dashboard.slug),
data=json.dumps({
'slug': 'a-module',
'type_id': str(self.module_type.id),
'data_type': str(self.data_type.name),
'data_group': str(self.data_group.name),
'title': 'Some module',
'description': 'Some text about the module',
'info': ['foo'],
'options': {
'thing': 'a value',
},
'query_parameters': {
'collect': ['thing:invalid-collect-thing']
},
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, is_(equal_to(400)))
def test_add_a_module_to_a_dashboard_bad_json(self):
resp = self.client.post(
'/dashboard/{}/module'.format(self.dashboard.slug),
data='not json',
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, is_(equal_to(400)))
def test_add_a_module_fails_with_bad_content_type(self):
resp = self.client.post(
'/dashboard/{}/module'.format(self.dashboard.slug),
data=json.dumps({
'slug': 'a-module',
'type_id': str(self.module_type.id),
'title': 'Some module',
'description': 'Some text about the module',
'info': ['foo'],
'options': {
'thing': 'a value',
},
'order': 1,
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/not-a-type')
assert_that(resp.status_code, is_(equal_to(415)))
def test_add_a_module_fails_with_invalid_slug(self):
"""Verifies that model validations are being run"""
resp = self.client.post(
'/dashboard/{}/module'.format(self.dashboard.slug),
data=json.dumps({
'slug': 'bad slug',
'slug': 'a module',
'type_id': str(self.module_type.id),
'title': 'Some module',
'description': 'Some text about the module',
'info': ['foo'],
'options': {'thing': 'a value'},
'order': 1,
'modules': [],
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, equal_to(400))
def test_add_a_module_wih_an_empty_id(self):
"""Verifies that model validations are being run"""
add_module_to_dashboard(
self.dashboard,
{
'id': '',
'slug': 'a-module',
'type_id': str(self.module_type.id),
'title': 'Some module',
'description': 'Some text about the module',
'info': ['foo'],
'options': {'thing': 'a value'},
'order': 1,
'modules': [],
}
)
dashboard = Dashboard.objects.get(id=self.dashboard.id)
assert_that(dashboard.module_set.all(), has_length(1))
def test_add_a_module_without_a_dashboard(self):
resp = self.client.post(
'/module/',
data=json.dumps({
'slug': 'a-module',
'type_id': str(self.module_type.id),
'title': 'Some module',
'description': 'Some text about the module',
'info': ['foo'],
'options': {
'thing': 'a value',
},
'objects': "some object",
'order': 1,
'modules': [],
}),
HTTP_AUTHORIZATION='Bearer development-oauth-access-token',
content_type='application/json')
assert_that(resp.status_code, is_(equal_to(404)))
@with_govuk_signon(permissions=['dashboard'])
def test_add_a_module_to_a_dashboard_you_do_not_own(self):
resp = self.client.post(
'/dashboard/{}/module'.format(self.dashboard_without_owner.slug),
data=json.dumps({
'slug': 'a-module',
'type_id': str(self.module_type.id),
'title': 'Some module',
'description': 'Some text about the module',
'info': ['foo'],
'options': {'thing': 'a value'},
'order': 1,
'modules': [],
}),
HTTP_AUTHORIZATION='Bearer correct-token',
content_type='application/json')
assert_that(resp.status_code, is_(equal_to(404)))
class ModuleTypeViewsTestCase(TestCase):
def test_list_types(self):
ModuleTypeFactory(name="foo", schema={})
ModuleTypeFactory(name="bar", schema={})
resp = self.client.get('/module-type')
resp_json = json.loads(resp.content)
assert_that(len(resp_json), 2)
assert_that(
resp_json,
has_item(has_entry('name', 'foo')),
)
assert_that(
resp_json,
has_item(has_entry('name', 'bar')),
)
def test_list_types_filter_by_name(self):
ModuleTypeFactory(name="foo", schema={})
| |
#
# Copyright (C) 2009-2010 <NAME>, <NAME>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
from gamera.core import *
init_gamera()
from gamera import knn
from gamera.plugins import pagesegmentation
from gamera.classify import ShapedGroupingFunction
from gamera.plugins.image_utilities import union_images
from gamera.plugins.listutilities import median
from gamera.toolkits.ocr.classes import Textline
import unicodedata
import sys
import time
def return_char(unicode_str, extra_chars_dict={}):
"""Converts a unicode character name to a unicode symbol.
Signature:
``return_char (classname, extra_chars_dict={})``
with
*classname*:
A class name derived from a unicode character name.
Example: ``latin.small.letter.a`` returns the character ``a``.
*extra_chars_dict*
A dictionary of additional translations of classnames to character codes.
This is necessary when you use class names that are not unicode names.
The character 'code' does not need to be an actual code, but can be
any string. This can be useful, e.g. for ligatures:
.. code:: Python
return_char(glyph.get_main_id(), {'latin.small.ligature.st':'st'})
When *classname* is not listed in *extra_chars_dict*, it must correspond
to a `standard unicode character name`_,
as in the examples of the following table:
.. _`standard unicode character names`: http://www.unicode.org/charts/
+-----------+----------------------------+----------------------------+
| Character | Unicode Name | Class Name |
+===========+============================+============================+
| ``!`` | ``EXCLAMATION MARK`` | ``exclamation.mark`` |
+-----------+----------------------------+----------------------------+
| ``2`` | ``DIGIT TWO`` | ``digit.two`` |
+-----------+----------------------------+----------------------------+
| ``A`` | ``LATIN CAPITAL LETTER A`` | ``latin.capital.letter.a`` |
+-----------+----------------------------+----------------------------+
| ``a`` | ``LATIN SMALL LETTER A`` | ``latin.small.letter.a`` |
+-----------+----------------------------+----------------------------+
"""
if len(extra_chars_dict) > 0:
try:
return extra_chars_dict[unicode_str]
except:
pass
name = unicode_str.upper()
# some xml-files might be corrupted due to wrong grouping
if name.startswith('_GROUP.'):
name = name[len('_GROUP.'):]
if name.startswith('_PART.'):
name = name[len('_PART.'):]
name = name.replace(".", " ")
try:
return unicodedata.lookup(name)
except KeyError:
strings = unicode_str.split(".")
if(strings[0] == "collated"):
return strings[1]
if(strings[0] == "cursive"):
return return_char(unicode_str[8:])
else:
print "ERROR: Name not found:", name
return ""
def chars_make_words(lines_glyphs,threshold=None):
"""Groups the given glyphs to words based upon the horizontal distance
between adjacent glyphs.
Signature:
``chars_make_words (glyphs, threshold=None)``
with
*glyphs*:
A list of ``Cc`` data types, each of which representing a character.
All glyphs must stem from the same single line of text.
*threshold*:
Horizontal white space greater than *threshold* will be considered
a word separating gap. When ``None``, the threshold value is
calculated automatically as 2.5 times teh median white space
between adjacent glyphs.
The result is a nested list of glyphs with each sublist representing
a word. This is the same data structure as used in `Textline.words`_
.. _`Textline.words`: gamera.toolkits.ocr.classes.Textline.html
"""
glyphs = lines_glyphs[:]
wordlist = []
if(threshold == None):
spacelist = []
total_space = 0
for i in range(len(glyphs) - 1):
spacelist.append(glyphs[i + 1].ul_x - glyphs[i].lr_x)
if(len(spacelist) > 0):
threshold = median(spacelist)
threshold = threshold * 2.5
else:
threshold = 0
word = []
for i in range(len(glyphs)):
if i > 0:
if((glyphs[i].ul_x - glyphs[i - 1].lr_x) > threshold):
wordlist.append(word)
word = []
word.append(glyphs[i])
if(len(word) > 0):
wordlist.append(word)
return wordlist
def textline_to_string(line, heuristic_rules="roman", extra_chars_dict={}):
"""Returns a unicode string of the text in the given ``Textline``.
Signature:
``textline_to_string (textline, heuristic_rules="roman", extra_chars_dict={})``
with
*textline*:
A ``Textline`` object containing the glyphs. The glyphs must already
be classified.
*heuristic_rules*:
Depending on the alphabeth, some characters can very similar and
need further heuristic rules for disambiguation, like apostroph and
comma, which have the same shape and only differ in their position
relative to the baseline.
When set to \"roman\", several rules specific for latin alphabeths
are applied.
*extra_chars_dict*
A dictionary of additional translations of classnames to character codes.
This is necessary when you use class names that are not unicode names.
Will be passed to `return_char`_.
As this function uses `return_char`_, the class names of the glyphs in
*textline* must corerspond to unicode character names, as described in
the documentation of `return_char`_.
.. _`return_char`: #return-char
"""
wordlist = line.words
s = ""
char = ""
for i in range(len(wordlist)):
if(i):
s = s + " "
for glyph in wordlist[i]:
char = return_char(glyph.get_main_id(), extra_chars_dict)
if (heuristic_rules == "roman"):
# disambiguation of similar roman characters
if (char == "x" or char == "X"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.x")
else:
glyph.classify_heuristic("latin.small.letter.x")
char = return_char(glyph.get_main_id())
if (char == "p" or char == "P"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.p")
else:
glyph.classify_heuristic("latin.small.letter.p")
char = return_char(glyph.get_main_id())
if (char == "o" or char == "O"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.o")
else:
glyph.classify_heuristic("latin.small.letter.o")
char = return_char(glyph.get_main_id())
if (char == "w" or char == "W"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.w")
else:
glyph.classify_heuristic("latin.small.letter.w")
char = return_char(glyph.get_main_id())
if (char == "v" or char == "V"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.v")
else:
glyph.classify_heuristic("latin.small.letter.v")
char = return_char(glyph.get_main_id())
if (char == "z" or char == "Z"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.z")
else:
glyph.classify_heuristic("latin.small.letter.z")
char = return_char(glyph.get_main_id())
if (char == "s" or char == "S"):
# not for long s
if (glyph.get_main_id().upper() != "LATIN.SMALL.LETTER.LONG.S"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.s")
else:
glyph.classify_heuristic("latin.small.letter.s")
char = return_char(glyph.get_main_id())
#if(char == "T" and (float(glyph.nrows)/float(glyph.ncols)) > 1.5):
# glyph.classify_heuristic("LATIN SMALL LETTER F")
# char = return_char(glyph.get_main_id())
if (char == "'" or char == ","):
if (glyph.ul_y < line.bbox.center_y):
glyph.classify_heuristic("APOSTROPHE")
char = "'"
else:
glyph.classify_heuristic("COMMA")
char = ","
s = s + char
return s
def textline_to_xml(line, heuristic_rules="roman", extra_chars_dict={}):
"""Returns xml encoding of words and coordinates for the text in the given ``Textline``.
Signature:
``textline_to_xml (textline, heuristic_rules="roman", extra_chars_dict={})``
with
*textline*:
A ``Textline`` object containing the glyphs. The glyphs must already
be classified.
*heuristic_rules*:
Depending on the alphabeth, some characters can very similar and
need further heuristic rules for disambiguation, like apostroph and
comma, which have the same shape and only differ in their position
relative to the baseline.
When set to \"roman\", several rules specific for latin alphabeths
are applied.
*extra_chars_dict*
A dictionary of additional translations of classnames to character codes.
This is necessary when you use class names that are not unicode names.
Will be passed to `return_char`_.
As this function uses `return_char`_, the class names of the glyphs in
*textline* must corerspond to unicode character names, as described in
the documentation of `return_char`_.
.. _`return_char`: #return-char
"""
# This function was added by <NAME> - <EMAIL> in 9/2010
# It is based on the textline_to_string function, but modified to produce output with XML tags
# These tags add tagging of words, along with the coordinates for the upper right and lower
# left corners of the word bounding box.
# Added to support the requirements of the 18th Connect project.
#
# Modified by <NAME> - <EMAIL> on 7/24/2013
# Correcting to more accurately reflect the Gale OCR XML structure:
# chainge <line> to <p>
wordlist = line.words
s = "<p>\n"
char = ""
for i in range(len(wordlist)):
word = ""
#set left/right x and upper/lower y from first glyph
word_leftx = wordlist[i][0].ul_x
word_uppery = wordlist[i][0].ul_y
word_rightx = wordlist[i][0].lr_x
word_lowery = wordlist[i][0].lr_y
for glyph in wordlist[i]:
#update right x and (conditionally) upper/lower y from current glyph
word_rightx = glyph.lr_x
if (glyph.ul_y < word_uppery):
word_uppery = glyph.ul_y
if (glyph.lr_y > word_lowery):
word_lowery = glyph.lr_y
char = return_char(glyph.get_main_id(), extra_chars_dict)
if (heuristic_rules == "roman"):
# disambiguation of similar roman characters
if (char == "x" or char == "X"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.x")
else:
glyph.classify_heuristic("latin.small.letter.x")
char = return_char(glyph.get_main_id())
if (char == "p" or char == "P"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.p")
else:
glyph.classify_heuristic("latin.small.letter.p")
char = return_char(glyph.get_main_id())
if (char == "o" or char == "O"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.o")
else:
glyph.classify_heuristic("latin.small.letter.o")
char = return_char(glyph.get_main_id())
if (char == "w" or char == "W"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.w")
else:
glyph.classify_heuristic("latin.small.letter.w")
char = return_char(glyph.get_main_id())
if (char == "v" or char == "V"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.v")
else:
glyph.classify_heuristic("latin.small.letter.v")
char = return_char(glyph.get_main_id())
if (char == "z" or char == "Z"):
if (glyph.ul_y <= line.bbox.center_y-(line.bbox.nrows/4)):
glyph.classify_heuristic("latin.capital.letter.z")
else:
glyph.classify_heuristic("latin.small.letter.z")
char = return_char(glyph.get_main_id())
if (char == "s" or | |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import math
import random
import warnings
import numpy as np
import tensorflow as tf
try:
tf.train.AdamOptimizer
except AttributeError:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import sklearn.metrics
#TODO Clean this
# Animesh commented this line out from gcn.gcn_datasets import GCNDataset
# from gcn.gcn_datasets import GCNDataset
try:
from . import gcn_datasets
except ImportError:
import gcn_datasets
from common.trace import traceln
def init_glorot(shape, name=None):
"""Glorot & Bengio (AISTATS 2010) init."""
init_range = np.sqrt(6.0/(shape[0]+shape[1]))
initial = tf.random_uniform(shape, minval=-init_range, maxval=init_range, dtype=tf.float32)
return tf.Variable(initial, name=name)
def init_normal(shape,stddev,name=None):
initial=tf.random_normal(shape, mean=0.0, stddev=stddev, dtype=np.float32)
return tf.Variable(initial, name=name)
class MultiGraphNN(object):
'''
Abstract Class for a Neural Net learned on a graph list
'''
def train_lG(self,session,gcn_graph_train):
'''
Train an a list of graph
:param session:
:param gcn_graph_train:
:return:
'''
for g in gcn_graph_train:
self.train(session, g, n_iter=1)
def test_lG(self, session, gcn_graph_test, verbose=True):
'''
Test on a list of Graph
:param session:
:param gcn_graph_test:
:return:
'''
acc_tp = np.float64(0.0)
nb_node_total = np.float64(0.0)
mean_acc_test = []
for g in gcn_graph_test:
acc = self.test(session, g, verbose=False)
mean_acc_test.append(acc)
nb_node_total += g.X.shape[0]
acc_tp += acc * g.X.shape[0]
g_acc = np.mean(mean_acc_test)
node_acc = acc_tp / nb_node_total
if verbose:
traceln('\t -- Mean Graph Accuracy', '%.4f' % g_acc)
traceln('\t -- Mean Node Accuracy', '%.4f' % node_acc)
return g_acc,node_acc
def predict_lG(self,session,gcn_graph_predict,verbose=True):
'''
Predict for a list of graph
:param session:
:param gcn_graph_test:
:return:
'''
lY_pred=[]
for g in gcn_graph_predict:
gY_pred = self.predict(session, g, verbose=verbose)
lY_pred.append(gY_pred)
return lY_pred
def predict_prob_lG(self, session, l_gcn_graph, verbose=True):
'''
Predict Probabilities for a list of graph
:param session:
:param l_gcn_graph:
:return a list of predictions
'''
lY_pred = []
for g in l_gcn_graph:
gY_pred = self.prediction_prob(session, g, verbose=verbose)
lY_pred.append(gY_pred)
return lY_pred
def get_nb_params(self):
total_parameters = 0
for variable in tf.trainable_variables():
# shape is an array of tf.Dimension
shape = variable.get_shape()
#traceln(shape)
#traceln(len(shape))
variable_parameters = 1
for dim in shape:
#traceln(dim)
variable_parameters *= dim.value
#traceln(variable_parameters)
total_parameters += variable_parameters
return total_parameters
def train_with_validation_set(self,session,graph_train,graph_val,max_iter,eval_iter=10,patience=7,graph_test=None,save_model_path=None):
'''
Implements training with a validation set
The model is trained and accuracy is measure on a validation sets
In addition, the model can be save and one can perform early stopping thanks to the patience argument
:param session:
:param graph_train: the list of graph to train on
:param graph_val: the list of graph used for validation
:param max_iter: maximum number of epochs
:param eval_iter: evaluate every eval_iter
:param patience: stopped training if accuracy is not improved on the validation set after patience_value
:param graph_test: Optional. If a test set is provided, then accuracy on the test set is reported
:param save_model_path: checkpoints filename to save the model.
:return: A Dictionary with training accuracies, validations accuracies and test accuracies if any, and the Wedge parameters
'''
best_val_acc=0.0
wait=0
stop_training=False
stopped_iter=max_iter
train_accuracies=[]
validation_accuracies=[]
test_accuracies=[]
conf_mat=[]
start_monitoring_val_acc=False
for i in range(max_iter):
if stop_training:
break
if i % eval_iter == 0:
traceln('\n -- Epoch ', i,' Patience ', wait)
_, tr_acc = self.test_lG(session, graph_train, verbose=False)
traceln(' Train Acc ', '%.4f' % tr_acc)
train_accuracies.append(tr_acc)
_, node_acc = self.test_lG(session, graph_val, verbose=False)
traceln(' -- Valid Acc ', '%.4f' % node_acc)
validation_accuracies.append(node_acc)
if save_model_path:
save_path = self.saver.save(session, save_model_path, global_step=i)
if graph_test:
test_graph_acc,test_acc = self.test_lG(session, graph_test, verbose=False)
traceln(' -- Test Acc ', '%.4f' % test_acc,' %.4f' % test_graph_acc)
test_accuracies.append(test_acc)
if node_acc > best_val_acc:
best_val_acc = node_acc
wait = 0
else:
if wait >= patience:
stopped_iter = i
stop_training = True
wait += 1
else:
random.shuffle(graph_train)
for g in graph_train:
self.train(session, g, n_iter=1)
#Final Save
traceln(' -- Stopped Model Training after : ',stopped_iter)
traceln(' -- Validation Accuracies : ',['%.4f' % (100*sx) for sx in validation_accuracies])
#traceln('Final Training Accuracy')
_,node_train_acc = self.test_lG(session, graph_train)
traceln(' -- Final Training Accuracy','%.4f' % node_train_acc)
traceln(' -- Final Valid Acc')
self.test_lG(session, graph_val)
R = {}
R['train_acc'] = train_accuracies
R['val_acc'] = validation_accuracies
R['test_acc'] = test_accuracies
R['stopped_iter'] = stopped_iter
R['confusion_matrix'] = conf_mat
#R['W_edge'] =self.get_Wedge(session)
if graph_test:
_, final_test_acc = self.test_lG(session, graph_test)
traceln(' -- Final Test Acc','%.4f' % final_test_acc)
R['final_test_acc'] = final_test_acc
val = R['val_acc']
traceln(' -- Validation scores', val)
epoch_index = np.argmax(val)
traceln(' -- Best performance on val set: Epoch', epoch_index,val[epoch_index])
traceln(' -- Test Performance from val', test_accuracies[epoch_index])
return R
class EnsembleGraphNN(MultiGraphNN):
'''
An ensemble of Graph NN Models
Construction Outside of class
'''
def __init__(self, graph_nn_models):
self.models = graph_nn_models
def train_lG(self, session, gcn_graph_train):
'''
Train an a list of graph
:param session:
:param gcn_graph_train:
:return:
'''
for m in self.models:
m.train_lG(session, gcn_graph_train)
def test_lG(self, session, gcn_graph_test, verbose=True):
'''
Test on a list of Graph
:param session:
:param gcn_graph_test:
:return:
'''
acc_tp = np.float64(0.0)
nb_node_total = np.float64(0.0)
mean_acc_test = []
Y_pred=self.predict_lG(session,gcn_graph_test)
Y_true =[g.Y for g in gcn_graph_test]
Y_pred_node = np.vstack(Y_pred)
node_acc = sklearn.metrics.accuracy_score(np.argmax(np.vstack(Y_true),axis=1),np.argmax(Y_pred_node,axis=1))
g_acc =-1
#node_acc = acc_tp / nb_node_total
if verbose:
traceln(' -- Mean Graph Accuracy', '%.4f' % g_acc)
traceln(' -- Mean Node Accuracy', '%.4f' % node_acc)
return g_acc, node_acc
def predict_lG(self, session, gcn_graph_predict, verbose=True):
'''
Predict for a list of graph
:param session:
:param gcn_graph_test:
:return:
'''
lY_pred = []
#Seem Very Slow ... Here
#I should predict for all graph
nb_models = float(len(self.models))
for g in gcn_graph_predict:
#Average Proba Here
g_pred=[]
for m in self.models:
gY_pred = m.prediction_prob(session, g, verbose=verbose)
g_pred.append(gY_pred)
#traceln(gY_pred)
lY_pred.append(np.sum(g_pred,axis=0)/nb_models)
return lY_pred
def train_with_validation_set(self, session, graph_train, graph_val, max_iter, eval_iter=10, patience=7,
graph_test=None, save_model_path=None):
raise NotImplementedError
class Logit(MultiGraphNN):
'''
Logistic Regression for MultiGraph
'''
def __init__(self,node_dim,nb_classes,learning_rate=0.1,mu=0.1,node_indim=-1):
self.node_dim=node_dim
self.n_classes=nb_classes
self.learning_rate=learning_rate
self.activation=tf.nn.relu
self.mu=mu
self.optalg = tf.train.AdamOptimizer(self.learning_rate)
self.stack_instead_add=False
self.train_Wn0=True
if node_indim==-1:
self.node_indim=self.node_dim
else:
self.node_indim=node_indim
def create_model(self):
'''
Create the tensorflow graph for the model
:return:
'''
self.nb_node = tf.placeholder(tf.int32,(), name='nb_node')
self.node_input = tf.placeholder(tf.float32, [None, self.node_dim], name='X_')
self.y_input = tf.placeholder(tf.float32, [None, self.n_classes], name='Y')
self.Wnode_layers=[]
self.Bnode_layers=[]
self.W_classif = tf.Variable(tf.random_uniform((self.node_indim, self.n_classes),
-1.0 / math.sqrt(self.node_dim),
1.0 / math.sqrt(self.node_dim)),
name="W_classif",dtype=np.float32)
self.B_classif = tf.Variable(tf.zeros([self.n_classes]), name='B_classif',dtype=np.float32)
self.logits =tf.add(tf.matmul(self.node_input,self.W_classif),self.B_classif)
cross_entropy_source = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_input)
# Global L2 Regulization
self.loss = tf.reduce_mean(cross_entropy_source) + self.mu * tf.nn.l2_loss(self.W_classif)
self.pred = tf.argmax(tf.nn.softmax(self.logits), 1)
self.correct_prediction = tf.equal(self.pred, tf.argmax(self.y_input, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
self.grads_and_vars = self.optalg.compute_gradients(self.loss)
self.train_step = self.optalg.apply_gradients(self.grads_and_vars)
# Add ops to save and restore all the variables.
self.init = tf.global_variables_initializer()
self.saver= tf.train.Saver(max_to_keep=5)
traceln(' -- Number of Params: ', self.get_nb_params())
def save_model(self, session, model_filename):
traceln(" -- Saving Model")
save_path = self.saver.save(session, model_filename)
def restore_model(self, session, model_filename):
self.saver.restore(session, model_filename)
traceln(" -- Model restored.")
def train(self,session,graph,verbose=False,n_iter=1):
'''
Apply a train operation, ie sgd step for a single graph
:param session:
:param graph: a graph from GCN_Dataset
:param verbose:
:param n_iter: (default 1) number of steps to perform sgd for this graph
:return:
'''
#TrainEvalSet Here
for i in range(n_iter):
#traceln('Train',X.shape,EA.shape)
feed_batch = {
self.nb_node: graph.X.shape[0],
self.node_input: graph.X,
self.y_input: graph.Y,
}
Ops =session.run([self.train_step,self.loss], feed_dict=feed_batch)
if verbose:
traceln(' -- Training Loss',Ops[1])
def test(self,session,graph,verbose=True):
'''
Test return the loss and accuracy for the graph passed as argument
:param session:
:param graph:
:param verbose:
:return:
'''
feed_batch = {
self.nb_node: graph.X.shape[0],
self.node_input: graph.X,
self.y_input: graph.Y,
}
Ops =session.run([self.loss,self.accuracy], feed_dict=feed_batch)
if verbose:
traceln(' -- Test Loss',Ops[0],' Test Accuracy:',Ops[1])
return Ops[1]
def predict(self,session,graph,verbose=True):
'''
Does the prediction
:param session:
:param graph:
:param verbose:
:return:
'''
feed_batch = {
self.nb_node: graph.X.shape[0],
self.node_input: graph.X,
}
Ops = session.run([self.pred], feed_dict=feed_batch)
if verbose:
traceln(' -- Got Prediction for:',Ops[0].shape)
return Ops[0]
class EdgeConvNet(MultiGraphNN):
'''
Edge-GCN Model for a graph list
'''
#Variable ignored by the set_learning_options
_setter_variables={
"node_dim":True,"edge_dim":True,"nb_class":True,
"num_layers":True,"lr":True,"mu":True,
"node_indim":True,"nconv_edge":True,
"nb_iter":True,"ratio_train_val":True}
def __init__(self,node_dim,edge_dim,nb_classes,num_layers=1,learning_rate=0.1,mu=0.1,node_indim=-1,nconv_edge=1,
):
self.node_dim=node_dim
self.edge_dim=edge_dim
self.n_classes=nb_classes
self.num_layers=num_layers
self.learning_rate=learning_rate
self.activation=tf.nn.tanh
#self.activation=tf.nn.relu
self.mu=mu
self.optalg = tf.train.AdamOptimizer(self.learning_rate)
self.stack_instead_add=False
self.nconv_edge=nconv_edge
self.residual_connection=False#deprecated
self.shared_We = False#deprecated
self.optim_mode=0 #deprecated
self.init_fixed=False #ignore --for test purpose
self.logit_convolve=False#ignore --for test purpose
self.train_Wn0=True #ignore --for test purpose
self.dropout_rate_edge_feat= 0.0
self.dropout_rate_edge = 0.0
self.dropout_rate_node = 0.0
self.dropout_rate_H = 0.0
self.use_conv_weighted_avg=False
self.use_edge_mlp=False
self.edge_mlp_dim = 5
self.sum_attention=False
if node_indim==-1:
self.node_indim=self.node_dim
else:
self.node_indim=node_indim
def set_learning_options(self,dict_model_config):
"""
Set all learning options that not directly accessible from the constructor
:param kwargs:
:return:
"""
#traceln( -- dict_model_config)
for attrname,val in dict_model_config.items():
#We treat the activation function differently as we can not pickle/serialiaze python function
if attrname=='activation_name':
if val=='relu':
self.activation=tf.nn.relu
elif val=='tanh':
self.activation=tf.nn.tanh
else:
raise Exception('Invalid Activation Function')
if attrname=='stack_instead_add' or attrname=='stack_convolutions':
self.stack_instead_add=val
if attrname not in self._setter_variables:
try:
traceln(' -- set ',attrname,val)
setattr(self,attrname,val)
except AttributeError:
warnings.warn("Ignored options for ECN"+attrname+':'+val)
def fastconvolve(self,Wedge,Bedge,F,S,T,H,nconv,Sshape,nb_edge,dropout_p_edge,dropout_p_edge_feat,
stack=True, use_dropout=False,zwe=None,use_weighted_average=False,
use_edge_mlp=False,Wedge_mlp=None,Bedge_mlp=None,use_attention=False):
'''
:param Wedge: Parameter matrix | |
car leave gaussain
YOLO(frame_resized, darknet_image, 0)
if (forward_appear[0] == 1):
forward_temp[0] = 1
else:
forward_temp[0] = 0
elif (forward_road_number == 2): #2 road
forward_road_1 = forward_foreground
forward_road_1 = forward_foreground[forward_road_coord[0, 1] - forward_gaussian_range[1]:forward_road_coord[0, 1] - forward_gaussian_range[1] + forward_road_coord[0, 3],forward_road_coord[0, 0] - forward_gaussian_range[0]:forward_road_coord[0, 0] - forward_gaussian_range[0] + forward_road_coord[0, 2]]
forward_road_2 = forward_foreground
forward_road_2 = forward_foreground[forward_road_coord[1, 1] - forward_gaussian_range[1]:forward_road_coord[1, 1] - forward_gaussian_range[1] + forward_road_coord[1, 3],forward_road_coord[1, 0] - forward_gaussian_range[0]:forward_road_coord[1, 0] - forward_gaussian_range[0] + forward_road_coord[1, 2]]
for i_1 in range(forward_road_coord[0, 3]):
for j_1 in range(forward_road_coord[0, 2]):
forward_pixel[0] = forward_road_1[i_1, j_1]
if (forward_pixel[0] > forward_pixel_threshold):
forward_sum[0] += 1
for i_2 in range(forward_road_coord[1, 3]):
for j_2 in range(forward_road_coord[1, 2]):
forward_pixel[1] = forward_road_2[i_2, j_2]
if (forward_pixel[1] > forward_pixel_threshold):
forward_sum[1] += 1
if (forward_sum[0] > forward_sum_threshold):
forward_appear[0] = 1
Sfp_time1 = datetime.datetime.now().strftime('%f')
Sp_time1 = datetime.datetime.now().strftime('%S')
else:
forward_appear[0] = 0
if (forward_sum[1] > forward_sum_threshold):
forward_appear[1] = 1
Sfp_time2 = datetime.datetime.now().strftime('%f')
Sp_time2 = datetime.datetime.now().strftime('%S')
else:
forward_appear[1] = 0
if (forward_appear[0] == 0 and forward_temp[0] == 1):
YOLO(frame_resized, darknet_image, 0)
if (forward_appear[1] == 0 and forward_temp[1] == 1):
YOLO(frame_resized, darknet_image, 1)
if (forward_appear[0] == 1):
forward_temp[0] = 1
else:
forward_temp[0] = 0
if (forward_appear[1] == 1):
forward_temp[1] = 1
else:
forward_temp[1] = 0
elif (forward_road_number == 3): #3 road
forward_road_1 = forward_foreground
forward_road_1 = forward_foreground[forward_road_coord[0, 1] - forward_gaussian_range[1]:forward_road_coord[0, 1] - forward_gaussian_range[1] + forward_road_coord[0, 3],forward_road_coord[0, 0] - forward_gaussian_range[0]:forward_road_coord[0, 0] - forward_gaussian_range[0] + forward_road_coord[0, 2]]
forward_road_2 = forward_foreground
forward_road_2 = forward_foreground[forward_road_coord[1, 1] - forward_gaussian_range[1]:forward_road_coord[1, 1] - forward_gaussian_range[1] + forward_road_coord[1, 3],forward_road_coord[1, 0] - forward_gaussian_range[0]:forward_road_coord[1, 0] - forward_gaussian_range[0] + forward_road_coord[1, 2]]
forward_road_3 = forward_foreground
forward_road_3 = forward_foreground[forward_road_coord[2, 1] - forward_gaussian_range[1]:forward_road_coord[2, 1] - forward_gaussian_range[1] + forward_road_coord[2, 3],forward_road_coord[2, 0] - forward_gaussian_range[0]:forward_road_coord[2, 0] - forward_gaussian_range[0] + forward_road_coord[2, 2]]
for i_1 in range(0, forward_road_coord[0, 3]):
for j_1 in range(0, forward_road_coord[0, 2]):
forward_pixel[0] = forward_road_1[i_1, j_1]
if (forward_pixel[0] > forward_pixel_threshold):
forward_sum[0] += 1
for i_2 in range(0, forward_road_coord[1, 3]):
for j_2 in range(0, forward_road_coord[1, 2]):
forward_pixel[1] = forward_road_2[i_2, j_2]
if (forward_pixel[1] > forward_pixel_threshold):
forward_sum[1] += 1
for i_3 in range(0, forward_road_coord[2, 3]):
for j_3 in range(0, forward_road_coord[2, 2]):
forward_pixel[2] = forward_road_3[i_3, j_3]
if (forward_pixel[2] > forward_pixel_threshold):
forward_sum[2] += 1
if (forward_sum[0] > forward_sum_threshold):
forward_appear[0] = 1
Sfp_time1 = datetime.datetime.now().strftime('%f')
Sp_time1 = datetime.datetime.now().strftime('%S')
else:
forward_appear[0] = 0
if (forward_sum[1] > forward_sum_threshold):
forward_appear[1] = 1
Sfp_time2 = datetime.datetime.now().strftime('%f')
Sp_time2 = datetime.datetime.now().strftime('%S')
else:
forward_appear[1] = 0
if (forward_sum[2] > forward_sum_threshold):
forward_appear[2] = 1
Sfp_time3 = datetime.datetime.now().strftime('%f')
Sp_time3 = datetime.datetime.now().strftime('%S')
else:
forward_appear[2] = 0
if (forward_appear[0] == 0 and forward_temp[0] == 1):
YOLO(frame_resized, darknet_image, 0)
if (forward_appear[1] == 0 and forward_temp[1] == 1):
YOLO(frame_resized, darknet_image, 1)
if (forward_appear[2] == 0 and forward_temp[2] == 1):
YOLO(frame_resized, darknet_image, 2)
if (forward_appear[0] == 1):
forward_temp[0] = 1
else:
forward_temp[0] = 0
if (forward_appear[1] == 1):
forward_temp[1] = 1
else:
forward_temp[1] = 0
if (forward_appear[2] == 1):
forward_temp[2] = 1
else:
forward_temp[2] = 0
for i in range(forward_road_number): #draw frame
frame_resized0=cv2.rectangle(frame_resized0, (count_area[i, 0],count_area[i, 1]),(count_area[i, 0]+count_area[i, 2],count_area[i,1]+count_area[i, 3]), (255, 255, 255), 1)
image = frame_resized0#detections
#
for i in range(forward_road_number + 1):
image = cv2.line(image,(int(darknet_road_line[i, 0] * darknet.network_width(netMain)), int(darknet_road_line[i, 1] * darknet.network_height(netMain))), (int(darknet_road_line[i, 2] * darknet.network_width(netMain)), int(darknet_road_line[i, 3] * darknet.network_height(netMain))), (0, 255, 255),1)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
_tmp_str="L" if chk_left(0,208) else "R"
_tmp_str2="L" if chk_left(400,200) else "R"
cv2.putText(image,_tmp_str,(0,208),cv2.FONT_HERSHEY_COMPLEX,.5,(255,255,255),1)#L
cv2.putText(image,_tmp_str2,(400,200),cv2.FONT_HERSHEY_COMPLEX,.5,(255,255,255),1)#R
cv2.waitKey(3)
cv2.imshow('Demo', image)
cv2.waitKey(3)
np.savetxt(kname,Csum,delimiter=',',fmt = '%d')
cv2.destroyWindow("Demo")
cap.release()
print("yolo exist")
_Keep_Run=True
#
def YOLO(Cimg, Dimg, Count_Road):
Sfn_time = int(datetime.datetime.now().strftime('%f'))
Sn_time = int(datetime.datetime.now().strftime('%S'))
darknet.copy_image_from_bytes(Dimg,Cimg.tobytes())
#
global Sp_time, Sfp_time, Tdis, Sspeed, Bspeed, Rhold, Csum, count
if (Count_Road == 0):
Sp_time = int(Sp_time1)
Sfp_time = int(Sfp_time1)
elif (Count_Road == 1):
Sp_time = int(Sp_time2)
Sfp_time = int(Sfp_time2)
elif (Count_Road == 2):
Sp_time = int(Sp_time3)
Sfp_time = int(Sfp_time3)
if (Sn_time < Sp_time):
Sn_time += 60
Tdis = ((Sn_time - Sp_time + 0.3) * 1000000 + Sfn_time - Sfp_time) * 0.000001
detections = darknet.detect_image(netMain, metaMain, Dimg, thresh=0.25)
#print(detections)
want=[]
for i in range(len(detections)):
if(i >= len(detections)):
break
Mitem = detections[i]
trust = Mitem[1]
Ikey = i
for j in range(i + 1, len(detections)):
if(j >= len(detections)):
break
Compitem = detections[j]
CompitemX = Compitem[2][0] + Compitem[2][2] / 2
CompitemY = Compitem[2][1] + Compitem[2][3] / 2
if (CompitemX > Mitem[2][0] and CompitemX < Mitem[2][0] + Mitem[2][2] and CompitemY > Mitem[2][1] and CompitemY < Mitem[2][1] + Mitem[2][3]):
if (Compitem[1] > trust):
trust = Mitem[1]
detections.remove(detections[Ikey])
j -= 1
Ikey = j
continue
detections.remove(detections[j])
j -= 1
want.append(detections[Ikey])
LCP = 0
Lastcar = 0
Csum = []
GCenter = np.array( [ [int(Fgaussian_range[0, 0] * 416 + Fgaussian_range[0, 2] * 416 / 2),int(Fgaussian_range[0, 1] * 416 + Fgaussian_range[0, 3] * 416 / 2)],[int(Fgaussian_range[1, 0] * 416 + Fgaussian_range[1, 2] * 416 / 2),int(Fgaussian_range[1, 1] * 416 + Fgaussian_range[1, 3] * 416 / 2)],[int(Fgaussian_range[2, 0] * 416 + Fgaussian_range[2, 2] * 416 / 2),int(Fgaussian_range[2, 1] * 416 + Fgaussian_range[2, 3] * 416 / 2)] ] ) #gaussain frame center
LastXY = [0, 0]
category = [ "sedan", "truck", "scooter", "bus", "Flinkcar", "Hlinkcar"]
XYrate0 = (float(darknet_road_line[Count_Road, 2] * 416 - darknet_road_line[Count_Road, 0]*416)) / (float(darknet_road_line[Count_Road, 3] *416 - darknet_road_line[Count_Road, 1]*416))
XYrate1 = (float(darknet_road_line[Count_Road + 1, 2]*416 - darknet_road_line[Count_Road + 1, 0]*416)) / (float(darknet_road_line[Count_Road + 1, 3]*416 - darknet_road_line[Count_Road + 1, 1]*416))
global count, count_clear, Speed, Vsc, Vbc, Svs, Svb, l_Minsp, l_Maxsp, l_Ct,frame_resized
if (count_clear == 1):
Vsc = [0, 0, 0]
Vbc = [0, 0, 0]
Bspeed = [0, 0, 0]
Sspeed = [0, 0, 0]
Rhold = [0, 0, 0]
for i in range(3):
for j in range(6):
count[i, j] = 0;
count_clear = 0
for detection in want: #counting
x, y, w, h = detection[2][0],\
detection[2][1],\
detection[2][2],\
detection[2][3]
xmin, ymin, xmax, ymax = convertBack(float(x), float(y), float(w), float(h))
for i in range(len(category)):
if (ExistM == 1):
if (Count_Road == forward_road_number - 1 and detection[0] != category[2]):
continue
elif (Count_Road < forward_road_number - 1 and detection[0] == category[2]):
continue
if (detection[0] == category[i]):
RLine = int(XYrate0 * (ymax - (darknet_road_line[Count_Road, 1] * 416)) + (darknet_road_line[Count_Road, 0] * 416))
LLine = int(XYrate1 * (ymax - (darknet_road_line[Count_Road + 1, 1] * 416)) + (darknet_road_line[Count_Road + 1, 0] * 416))
print('xmax '+str(xmax)+' xmix '+str(xmin)+' Rline '+str(RLine*1.1)+' Lline '+str(LLine/1.3))
if (xmin - (xmax-xmin)/2 < RLine *1.108 and xmin + (xmax-xmin) > LLine ): #detect frame cneter between right line and left line
Rhold[Count_Road] += (((Sn_time - Sp_time) * 1000000 + Sfn_time - Sfp_time) * 0.000001) * 10
if (LCP == 0):
count[Count_Road, i] += 1
LastXY[0] = GCenter[Count_Road, 0] - (x + w / 2)
LastXY[1] = GCenter[Count_Road, 1] - (y + h / 2)
Lastcar = i
Speed = (1.3 / (Fgaussian_range[Count_Road, 3] * 416) * 3.6 * h) / Tdis
if (Speed > l_Minsp and Speed < l_Maxsp):
if (detection[0] == category[0]):
Vsc[Count_Road] += 1
Sspeed[Count_Road] += Speed
Svs[Count_Road] = int(Sspeed[Count_Road] / Vsc[Count_Road])
elif (detection[0] == category[1] or detection[0] == category[3] or detection[0] == category[4] or detection[0] == category[5]):
Vbc[Count_Road] += 1
Bspeed[Count_Road] += Speed
Svb[Count_Road] = int(Bspeed[Count_Road] / Vbc[Count_Road])
else:
if (pow(pow(GCenter[Count_Road, 1] - (y + h / 2), 2) + pow(GCenter[Count_Road, 0] - (x + w / 2), 2), 0.5) < pow(pow(LastXY[0], 2) + pow(LastXY[1], 2), 0.5) and (y + h) > GCenter[Count_Road, 1]):
Speed = (1.3 / (Fgaussian_range[Count_Road, 3] * 416) * 3.6 * h) / Tdis
if (Speed > l_Minsp and Speed < l_Maxsp):
if (detection[0] == category[0]):
Sspeed[Count_Road] += Speed
Vsc[Count_Road] += 1
Svs[Count_Road] = int(Sspeed[Count_Road] / Vsc[Count_Road])
elif (detection[0] == category[1] or detection[0] == category[3] or detection[0] == category[4] or detection[0] == category[5]):
Vbc[Count_Road] += 1
Bspeed[Count_Road] += Speed
Svb[Count_Road] = int(Bspeed[Count_Road] / Vbc[Count_Road])
count[Count_Road, Lastcar] -= 1
count[Count_Road, i] += 1
LastXY[0] = GCenter[Count_Road, 0] - (x + w / 2)
LastXY[1] = GCenter[Count_Road, 1] - (y + h / 2)
Lastcar = i
LCP += 1
break
Csum = [0, 0, 0, 0, 0, 0]
for i in range(3):
for j in range(6):
Csum[j] += count[i, j]
for i in range(3):
print("SedanRoad" + str(i) + " sedan: " + str(count[i, 0]) + " truck: " + str(count[i, 1]) + " scooter: " + str(count[i, 2]) | |
#!/usr/bin/env python3
# encoding: utf-8
"""
Determine scaling factors for a given list of levels of theory
Based on DOI: 10.1016/j.cpc.2016.09.004
Adapted by <NAME> and <NAME>
"""
import os
import time
import shutil
from arc.common import get_logger, check_ess_settings, time_lapse, initialize_log, initialize_job_types
from arc.exceptions import InputError
from arc.parser import parse_zpe
from arc.scheduler import Scheduler
from arc.settings import arc_path
from arc.species.species import ARCSpecies
try:
from arc.settings import global_ess_settings
except ImportError:
global_ess_settings = None
logger = get_logger()
HEADER = 'FREQ: A PROGRAM FOR OPTIMIZING SCALE FACTORS (Version 1)\n'\
' written by \n'\
'<NAME>, <NAME>, <NAME>, and <NAME>\n'\
'Department of Chemistry and Supercomputing Institute\n'\
'University of Minnesota, Minnesota 55455-0431\n'\
'CITATIONS:\n'\
'1. <NAME>, <NAME>, <NAME>, <NAME>, J. Chem. Theory Comput. 2010, 6, 9, 2872-2887,\n'\
' DOI: 10.1021/ct100326h\n'\
'2. <NAME>, <NAME>, <NAME>,, <NAME>, Computer Physics Communications 2017, 210, 132-138,\n'\
' DOI: 10.1016/j.cpc.2016.09.004\n\n'
def determine_scaling_factors(levels_of_theory, ess_settings=None, init_log=True):
"""
Determine the zero-point energy, harmonic frequencies, and fundamental frequencies scaling factors
for a given frequencies level of theory.
Args:
levels_of_theory (list, str): A list of frequencies levels of theory
for which scaling factors are determined.
A string can also be passed for just one level of theory.
ess_settings (dict, optional): A dictionary of available ESS (keys) and a corresponding server list (values).
init_log (bool, optional): Whether to initialize the logger. True to initialize.
Should be True when called as a stand alone, and False when called within ARC.
Returns:
str: The modified level of theory
"""
if init_log:
initialize_log(log_file='scaling_factor.log', project='Scaling Factors')
if isinstance(levels_of_theory, str):
levels_of_theory = [levels_of_theory]
if not isinstance(levels_of_theory, list):
raise InputError('levels_of_theory must be a list (or a string if only one level is desired). Got: {0}'.format(
type(levels_of_theory)))
t0 = time.time()
logger.info('\n\n\n')
logger.info(HEADER)
logger.info('\n\nstarting ARC...\n')
# only run opt (fine) and freq
job_types = initialize_job_types(dict()) # get the defaults, so no job type is missing
job_types = {job_type: False for job_type in job_types.keys()}
job_types['opt'], job_types['fine'], job_types['freq'] = True, True, True
lambda_zpes, zpe_dicts, times = list(), list(), list()
for level_of_theory in levels_of_theory:
t1 = time.time()
logger.info('\nComputing scaling factors at the {0} level of theory...\n\n'.format(level_of_theory))
renamed_level = rename_level(level_of_theory)
project = 'scaling_' + renamed_level
project_directory = os.path.join(arc_path, 'Projects', 'scaling_factors', project)
if os.path.isdir(project_directory):
shutil.rmtree(project_directory)
species_list = get_species_list()
if '//' in level_of_theory:
raise InputError('Level of theory should either be a composite method or in a method/basis-set format. '
'Got {0}'.format(level_of_theory))
if '/' not in level_of_theory: # assume this is a composite method
freq_level = ''
composite_method = level_of_theory.lower()
job_types['freq'] = False
else:
freq_level = level_of_theory.lower()
composite_method = ''
job_types['freq'] = True
ess_settings = check_ess_settings(ess_settings or global_ess_settings)
Scheduler(project=project, project_directory=project_directory, species_list=species_list,
composite_method=composite_method, opt_level=freq_level, freq_level=freq_level,
ess_settings=ess_settings, job_types=job_types, allow_nonisomorphic_2d=True)
zpe_dict = dict()
for spc in species_list:
zpe_dict[spc.label] = parse_zpe(os.path.join(project_directory, 'output', 'Species', spc.label,
'geometry', 'freq.out')) * 1000 # convert to J/mol
zpe_dicts.append(zpe_dict)
lambda_zpes.append(calculate_truhlar_scaling_factors(zpe_dict, level_of_theory))
times.append(time_lapse(t1))
summarize_results(lambda_zpes, levels_of_theory, zpe_dicts, times, time_lapse(t0))
logger.info('\n\n\n')
logger.info(HEADER)
harmonic_freq_scaling_factors = [lambda_zpe * 1.014 for lambda_zpe in lambda_zpes]
return harmonic_freq_scaling_factors
def calculate_truhlar_scaling_factors(zpe_dict, level_of_theory):
"""
Calculate the scaling factors using Truhlar's method:
FREQ: A PROGRAM FOR OPTIMIZING SCALE FACTORS (Version 1)
written by <NAME>, <NAME>, <NAME>, and <NAME>
Department of Chemistry and Supercomputing Institute
University of Minnesota, Minnesota 55455-0431
Citations:
1. <NAME>, <NAME>, <NAME>, <NAME>, J. Chem. Theory Comput. 2010, 6, 9, 2872-2887
DOI: 10.1021/ct100326h
2. <NAME>, <NAME>, <NAME>,, <NAME>, Computer Physics Communications 2017, 210, 132-138
DOI: 10.1016/j.cpc.2016.09.004
Args:
zpe_dict (dict): The calculated vibrational zero-point energies at the requested level of theory.
Keys are species labels, values are floats representing the ZPE in J/mol.
level_of_theory (str): The frequencies level of theory.
Returns:
float: The scale factor for the vibrational zero-point energy (lambda ZPE) as defined in reference [2].
"""
unconverged = [key for key, val in zpe_dict.items() if val is None]
if len(unconverged):
logger.info('\n\nWarning: Not all species in the standard set have converged at the {0} level of theory!\n'
'Unconverged species: {1}\n\n'.format(level_of_theory, unconverged))
else:
logger.info('\n\nAll species in the standard set have converged at the {0} level of theory\n\n\n'.format(
level_of_theory))
# Experimental ZPE values converted from kcal/mol to J/mol, as reported in reference [2]:
exp_zpe_dict = {'C2H2': 16.490 * 4184,
'CH4': 27.710 * 4184,
'CO2': 7.3 * 4184,
'CO': 3.0929144 * 4184,
'F2': 1.302 * 4184,
'CH2O': 16.1 * 4184,
'H2O': 13.26 * 4184,
'H2': 6.231 * 4184,
'HCN': 10.000 * 4184,
'HF': 5.864 * 4184,
'N2O': 6.770 * 4184,
'N2': 3.3618 * 4184,
'NH3': 21.200 * 4184,
'OH': 5.2915 * 4184,
'Cl2': 0.7983 * 4184}
numerator, denominator = 0.0, 0.0 # numerator and denominator in eq. 5 of reference [2]
for label, zpe in zpe_dict.items():
numerator += zpe * exp_zpe_dict[label] if zpe is not None else 0
if zpe is not None:
denominator += zpe ** 2.0
else:
logger.error('ZPE of species {0} could not be determined!'.format(label))
lambda_zpe = numerator / denominator # lambda_zpe on the left side of eq. 5 of [2]
return lambda_zpe
def summarize_results(lambda_zpes, levels_of_theory, zpe_dicts, times, overall_time, base_path=None):
"""
Print and save the results to file.
Args:
lambda_zpes (list): The scale factors for the vibrational zero-point energy, entries are floats.
levels_of_theory (list): The frequencies levels of theory.
zpe_dicts (list): Entries are The calculated vibrational zero-point energies at the requested level of theory.
Keys are species labels, values are floats representing the ZPE in J/mol.
times (list): Entries are string-format of the calculation execution times.
overall_time (str): A string-format of the overall calculation execution time.
base_path (str, optional): The path to the scaling factors base folder.
"""
base_path = base_path or os.path.join(arc_path, 'Projects', 'scaling_factors')
if not os.path.exists(base_path):
os.makedirs(base_path)
i, success = 0, False
while not success:
info_file_path = os.path.join(base_path, 'scaling_factors_' + str(i) + '.info')
if os.path.isfile(info_file_path):
i += 1
else:
success = True
with open(info_file_path, 'w') as f:
f.write(HEADER)
arkane_text = '\n\n\nYou may copy-paste the following harmonic frequencies scaling factor/s to Arkane\n' \
'(paste in the `freq_dict` under assign_frequency_scale_factor() in arkane/statmech.py):\n'
arkane_formats = list()
harmonic_freq_scaling_factors = list()
for lambda_zpe, level_of_theory, zpe_dict, execution_time\
in zip(lambda_zpes, levels_of_theory, zpe_dicts, times):
harmonic_freq_scaling_factor = lambda_zpe * 1.014
fundamental_freq_scaling_factor = lambda_zpe * 0.974
harmonic_freq_scaling_factors.append(fundamental_freq_scaling_factor)
unconverged = [key for key, val in zpe_dict.items() if val is None]
text = '\n\nLevel of theory: {0}\n'.format(level_of_theory)
if unconverged:
text += 'The following species from the standard set did not converge at this level:\n {0}\n'.format(
unconverged)
text += 'Scale Factor for Zero-Point Energies = {0:.3f}\n'.format(lambda_zpe)
text += 'Scale Factor for Harmonic Frequencies = {0:.3f}\n'.format(harmonic_freq_scaling_factor)
text += 'Scale Factor for Fundamental Frequencies = {0:.3f}\n'.format(fundamental_freq_scaling_factor)
text += '(execution time: {0})\n'.format(execution_time)
logger.info(text)
f.write(text)
arkane_formats.append(" '{0}': {1:.3f}, # [4]\n".format(level_of_theory,
harmonic_freq_scaling_factor))
logger.info(arkane_text)
f.write(arkane_text)
for arkane_format in arkane_formats:
logger.info(arkane_format)
f.write(arkane_format)
overall_time_text = '\n\nScaling factors calculation for {0} levels of theory completed' \
' (elapsed time: {1}).\n'.format(len(levels_of_theory), overall_time)
logger.info(overall_time_text)
f.write(overall_time_text)
def get_species_list():
"""
Generates the standardized species list.
Returns:
list: The standardized species list initialized with xyz.
"""
c2h2_xyz = {'symbols': ('C', 'C', 'H', 'H'), 'isotopes': (12, 12, 1, 1),
'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.203142), (0.0, -0.0, 2.265747), (-0.0, -0.0, -1.062605))}
ch4_xyz = {'symbols': ('C', 'H', 'H', 'H', 'H'), 'isotopes': (12, 1, 1, 1, 1),
'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.08744517), (1.02525314, 0.0, -0.36248173),
(-0.51262658, 0.88789525, -0.36248173), (-0.51262658, -0.88789525, -0.36248173))}
co2_xyz = {'symbols': ('C', 'O', 'O'), 'isotopes': (12, 16, 16),
'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.1594846), (0.0, 0.0, -1.1594846))}
co_xyz = {'symbols': ('O', 'C'), 'isotopes': (16, 12), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.12960815))}
f2_xyz = {'symbols': ('F', 'F'), 'isotopes': (19, 19), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.3952041))}
ch2o_xyz = {'symbols': ('O', 'C', 'H', 'H'), 'isotopes': (16, 12, 1, 1),
'coords': ((0.0, 0.0, 0.674622), (0.0, 0.0, -0.529707),
(0.0, 0.935488, -1.109367), (0.0, -0.935488, -1.109367))}
h2o_xyz = {'symbols': ('O', 'H', 'H'), 'isotopes': (16, 1, 1),
'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 0.95691441), (0.92636305, 0.0, -0.23986808))}
h2_xyz = {'symbols': ('H', 'H'), 'isotopes': (1, 1), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 0.74187646))}
hcn_xyz = {'symbols': ('C', 'N', 'H'), 'isotopes': (12, 14, 1),
'coords': ((0.0, 0.0, -0.500365), (0.0, 0.0, 0.65264), (0.0, 0.0, -1.566291))}
hf_xyz = {'symbols': ('F', 'H'), 'isotopes': (19, 1), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 0.91538107))}
n2o_xyz = {'symbols': ('N', 'N', 'O'), 'isotopes': (14, 14, 16),
'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.12056262), | |
<reponame>SereneQI/text-image-similarity
import json
import os
import re
import io
import argparse
import multiprocessing
import numpy as np
import torch
import torch.utils.data as data
import numpy as np
from PIL import Image
from nltk.tokenize import word_tokenize
import fastText
from torchvision import transforms
from utils.config import path
from utils.utils import encode_sentence, _load_dictionary, encode_sentence_fasttext, fr_preprocess, collate_fn_padded, bpe_encode
from models.model import joint_embedding
from torch.utils.data import DataLoader
from bpemb import BPEmb
#from pycocotools import mask as maskUtils
#from pycocotools.coco import COCO
def _load_vec(emb_path):
vectors = []
word2id = {}
with io.open(emb_path, 'r', encoding='utf-8', newline='\n', errors='ignore') as f:
next(f)
for i, line in enumerate(f):
word, vect = line.rstrip().split(' ', 1)
vect = np.fromstring(vect, sep=' ')
assert word not in word2id, 'word found twice'
vectors.append(vect)
word2id[word] = len(word2id)
id2word = {v: k for k, v in word2id.items()}
embeddings = np.vstack(vectors)
return embeddings, id2word, word2id
class CocoCaptionsRV(data.Dataset):
def __init__(self, root=path["COCO_ROOT"], coco_json_file_path=path["COCO_RESTVAL_SPLIT"], sset="train", transform=None, embed_type='bin', embed_size=300):
self.root = os.path.join(root, "images/")
self.transform = transform
# dataset.json come from Karpathy neural talk repository and contain the restval split of coco
with open(coco_json_file_path, 'r') as f:
datas = json.load(f)
if sset == "train":
self.content = [x for x in datas["images"] if x["split"] == "train"]
elif sset == "trainrv":
self.content = [x for x in datas["images"] if x["split"] == "train" or x["split"] == "restval"]
elif sset == "val":
self.content = [x for x in datas["images"] if x["split"] == "val"]
else:
self.content = [x for x in datas["images"] if x["split"] == "test"]
self.content = [(os.path.join(y["filepath"], y["filename"]), [x["raw"] for x in y["sentences"]]) for y in self.content]
self.word2id = None
self.bpe = False
if embed_type == 'bin':
self.embed = fastText.load_model('/data/m.portaz/wiki.en.bin')
elif embed_type == "multi":
self.embed, self.id2word, self.word2id = _load_vec('/data/m.portaz/wiki.multi.en.vec')
elif embed_type == "subword":
print("Loading subword model")
self.embed = BPEmb(lang="en", dim=embed_size)
self.bpe = True
else:
if embed_type[-3:] == 'vec':
self.embed, self.id2word, self.word2id = _load_vec(embed_type)
else:
self.embed = fastText.load_model(embed_type)
#self.dico = _load_dictionary(word_dict_path)
def __getitem__(self, index, raw=False):
idx = index / 5
idx_cap = index % 5
path = self.content[int(idx)][0]
target = self.content[int(idx)][1][idx_cap]
if raw:
return path, target
img = Image.open(os.path.join(self.root, path)).convert('RGB')
if self.transform is not None:
img = self.transform(img)
#target = encode_sentence(target, self.params, self.dico)
if self.word2id is None:
if self.bpe:
target = bpe_encode(target, self.embed)
else:
target = encode_sentence_fasttext(target, self.embed)
else:
target = encode_sentence(target, self.embed, self.word2id)
return img, target
def __len__(self):
return len(self.content) * 5
class Shopping(data.Dataset):
def __init__(self, args, root_dir, captionFile, transform, sset="train"):
self.transform = transform
self.imList = []
self.capList = []
f = open(captionFile)
for i, line in enumerate(f):
line = line.rstrip()
im, cap = line.split('\t')
if 1 <= len(cap.split(' ')) <= 20:
self.imList.append(os.path.join(root_dir, im+'.jpg'))
self.capList.append(cap.split(' '))
separation = len(self.imList)-(len(self.imList)//20)
if sset == "train":
self.imList = self.imList[:separation]
self.capList = self.capList[:separation]
elif sset == "val": #5 last % used for validation
self.imList = self.imList[separation:]
self.capList = self.capList[separation:]
#path_params = os.path.join(word_dict_path, 'utable.npy')
#self.params = np.load(path_params, encoding='latin1')
self.embed = fastText.load_model(args.dict)
#self.dico = _load_dictionary(word_dict_path)
def __getitem__(self, index, raw=False):
path = self.imList[int(index)]
target = self.capList[int(index)]
img = Image.open(path).convert('RGB')
img = self.transform(img)
target = encode_sentence_fasttext(target, self.embed, False)
return img, target
def __len__(self):
return len(self.imList)
class ImageDataset(data.Dataset):
def __init__(self, filename, image_dir, transform):
self.imList = [os.path.join(image_dir,imName.rstrip()) for imName in open(filename).read().splitlines()]
self.transform=transform
def __len__(self):
return len(self.imList)
def __getitem__(self, index):
image = Image.open(self.imList[index])
image = self.transform(image)
return image, index
class CaptionDataset(data.Dataset):
def __init__(self, filename, dictionary):
if dictionary[-3:] == 'vec':
self.embed = _load_vec(dictionary)
self.fastText = False
else:
self.embed = fastText.load_model(dictionary)
self.fastText = True
self.sentences = [ (line.rstrip(), i) for i, line in enumerate(open(filename))]
def __len__(self):
return len(self.sentences)
def __getitem__(self, index):
#return self.sentences[index]
if self.fastText:
return encode_sentence_fasttext(self.sentences[index][0], self.embed), self.sentences[index][1]
else:
return encode_sentence(self.sentences[index][0], self.embed[0], self.embed[2], tokenize=False), self.sentences[index][1]
class MultiLingualDataset(data.Dataset):
def __init__(self, filename, image_dir, captionsFileList, dictDict, transform, eval_mode=False):
self.transform=transform
self.rootDir = image_dir
self.embeddings = {}
self.captions = {}
self.eval_mode = eval_mode
for captionFile, lang in captionsFileList:
if lang in dictDict:
with open(captionFile) as fcap:
self.embeddings[lang] = _load_vec(dictDict[lang])
self.captions[lang] = [ (line.rstrip(), i) for i, line in enumerate(fcap)]
self.imList = [os.path.join(image_dir,imName.rstrip()) for imName in open(filename).read().splitlines()]
def __len__(self):
return np.sum([len(self.captions[lang]) for lang in self.captions])
def getImage(self, index):
image = Image.open(self.imList[index])
image = self.transform(image)
return image
def getCaption(self, lang, index):
return encode_sentence(self.captions[lang][index], self.embeddings[lang][0], self.embeddings[lang][2], tokenize=False)
def __getitem__(self, index):
baseIndex = 0
currentIndex = 0
for lang in self.captions:
if index < baseIndex + len(self.captions[lang]):
currentIndex = index - baseIndex
image = Image.open(self.imList[currentIndex])
image = self.transform(image)
cap = self.captions[lang][currentIndex]
cap = encode_sentence(caption, self.embeddings[lang][0], self.embeddings[lang][2], tokenize=False)
else:
baseIndex += len(self.captions[lang])
return image, cap
class Multi30k(data.Dataset):
def __init__(self, sset="train", image_dir="/data/datasets/flickr30k_images", split_dir="data/image_splits", tok_dir="data/tok", lang='en', transform=None, embed_type="multi", typ='all', dic=None):
self.transform = transform
self.imList = []
self.rootDir = image_dir
self.typ = typ
#langs = ['fr', 'en', 'de', 'cs']
if dic is None:
if "en" in lang:
if embed_type == "multi":
print("Using multi embeddings")
self.engEmb, _, self.engWordsID = _load_vec('/data/m.portaz/wiki.multi.en.vec')
elif embed_type == "align":
print("Using aligned embeddings")
self.engEmb, _, self.engWordsID = _load_vec('/data/m.portaz/wiki.en.align.vec')
elif embed_type == 'bivec':
print("Using bivec embeddings")
self.engEmb, _, self.engWordsID = _load_vec('/data/m.portaz/bivec_model_vec.en-fr.en.vec')
else:
print("Unknown embedding type :", embed_type)
if "fr" in lang:
if embed_type == "multi":
self.frEmb, _, self.frWordsID = _load_vec('/data/m.portaz/wiki.multi.fr.vec')
elif embed_type == "align":
self.frEmb, _, self.frWordsID = _load_vec('/data/m.portaz/wiki.fr.align.vec')
elif embed_type == 'bivec':
self.frEmb, _, self.frWordsID = _load_vec('/data/m.portaz/bivec_model_vec.en-fr.fr.vec')
if "de" in lang:
if embed_type == "multi":
self.deEmb, _, self.deWordsID = _load_vec('/data/m.portaz/wiki.multi.de.vec')
elif embed_type == "align":
self.deEmb, _, self.deWordsID = _load_vec('/data/m.portaz/wiki.de.align.vec')
elif embed_type == "bivec":
self.deEmb, _, self.deWordsID = _load_vec('/data/m.portaz/bivec_model_vec.de-en.de.vec')
if "cs" in lang:
if embed_type == "multi":
self.csEmb, _, self.csWordsID = _load_vec('/data/m.portaz/wiki.multi.cs.vec')
elif embed_type == "align":
self.csEmb, _, self.csWordsID = _load_vec('/data/m.portaz/wiki.cs.align.vec')
elif embed_type == "bivec":
print("Bivec not supported for czech")
else:
if 'en' in lang:
self.engEmb , _, self.engWordsID = _load_vec(dic)
self.captions = []
if "train" in sset:
imFile = os.path.join(split_dir, "train.txt")
if "fr" in lang:
for i, line in enumerate(open(os.path.join(tok_dir, "train.lc.norm.tok.fr"))):
self.captions.append( (line.rstrip(), 'fr', i) )
if "en" in lang:
for i, line in enumerate(open(os.path.join(tok_dir, "train.lc.norm.tok.en"))):
self.captions.append( (line.rstrip(), 'en', i) )
if "de" in lang:
for i, line in enumerate(open(os.path.join(tok_dir, "train.lc.norm.tok.de"))):
self.captions.append( (line.rstrip(), 'de', i) )
if "cs" in lang:
for i, line in enumerate(open(os.path.join(tok_dir, "train.lc.norm.tok.cs"))):
self.captions.append( (line.rstrip(), 'cs', i) )
elif "val" in sset:
imFile = os.path.join(split_dir, "val.txt")
if "fr" in lang:
for i, line in enumerate(open(os.path.join(tok_dir, "val.lc.norm.tok.fr"))):
self.captions.append( (line.rstrip(), 'fr', i) )
if "en" in lang:
for i, line in enumerate(open(os.path.join(tok_dir, "val.lc.norm.tok.en"))):
self.captions.append( (line.rstrip(), 'en', i) )
if "de" in lang:
for i, line in enumerate(open(os.path.join(tok_dir, "val.lc.norm.tok.de"))):
self.captions.append( (line.rstrip(), 'de', i) )
if "cs" in lang:
for i, line in enumerate(open(os.path.join(tok_dir, "val.lc.norm.tok.cs"))):
self.captions.append( (line.rstrip(), 'cs', i) )
else:
imFile = os.path.join(split_dir, "test_2016_flickr.txt")
if "fr" in lang:
for i, line in enumerate(open(os.path.join(tok_dir, "test_2016_flickr.lc.norm.tok.fr"))):
self.captions.append( (line.rstrip(), 'fr', i) )
if "en" in lang:
for i, line in enumerate(open(os.path.join(tok_dir, "test_2016_flickr.lc.norm.tok.en"))):
self.captions.append( (line.rstrip(), 'en', i) )
if "de" in lang:
for i, line in enumerate(open(os.path.join(tok_dir, "test_2016_flickr.lc.norm.tok.de"))):
self.captions.append( (line.rstrip(), 'de', i) )
if "cs" in lang:
for i, line in enumerate(open(os.path.join(tok_dir, "test_2016_flickr.lc.norm.tok.cs"))):
self.captions.append( (line.rstrip(), 'cs', i) )
for line in open(imFile):
imName = line.rstrip()
self.imList.append(os.path.join(image_dir,imName))
def __len__(self):
return len(self.captions)
def __getitem__(self, index):
caption, lang, imId = self.captions[index]
if lang == 'fr':
cap = encode_sentence(caption, self.frEmb, self.frWordsID, tokenize=False)
elif lang == 'en':
cap = encode_sentence(caption, self.engEmb, self.engWordsID, tokenize=False)
elif lang == 'de':
cap = encode_sentence(caption, self.deEmb, self.deWordsID, tokenize=False)
elif lang == 'cs':
cap = encode_sentence(caption, self.csEmb, self.csWordsID, tokenize=False)
else:
print("Unknown language : ", lang)
return None
#return caption, cap
#return self.imList[imId], caption
if self.typ == 'image':
im = self.transform(Image.open(self.imList[imId]))
return im
elif self.typ == 'caption':
return cap
else:
im = self.transform(Image.open(self.imList[imId]))
return im, cap
def getImageAndCaption(self, index):
caption, lang, imId = self.captions[index]
im = Image.open(self.imList[imId])
return caption, im
class FileDataset(data.Dataset):
def __init__(self, img_dir_paths, imgs=None, transform=None):
self.transform = transform
self.root = img_dir_paths
self.imgs = imgs or [os.path.join(img_dir_paths, f) for f in os.listdir(img_dir_paths) if re.match(r'.*\.jpg', f)]
def __getitem__(self, index):
img = Image.open(self.imgs[index]).convert('RGB')
if self.transform is not None:
img = self.transform(img)
return img
def get_image_list(self):
return self.imgs
def __len__(self):
return len(self.imgs)
class DoubleDataset(data.Dataset):
def __init__(self, d1, d2):
self.d1 = d1
self.d2 = d2
| |
from numba import config, ir, ir_utils, utils, prange
import types
from numba.ir_utils import (
mk_unique_var,
next_label,
add_offset_to_labels,
replace_vars,
remove_dels,
remove_dead,
rename_labels,
find_topo_order,
merge_adjacent_blocks,
GuardException,
require,
guard,
get_definition,
find_callname
)
from numba.analysis import compute_cfg_from_blocks
from numba.targets.rangeobj import range_iter_len
from numba.unsafe.ndarray import empty_inferred as unsafe_empty_inferred
import numba.types as nbtypes
import numpy as np
"""
Variable enable_inline_arraycall is only used for testing purpose.
"""
enable_inline_arraycall = True
class InlineClosureCallPass(object):
"""InlineClosureCallPass class looks for direct calls to locally defined
closures, and inlines the body of the closure function to the call site.
"""
def __init__(self, func_ir, flags, run_frontend):
self.func_ir = func_ir
self.flags = flags
self.run_frontend = run_frontend
def run(self):
"""Run inline closure call pass.
"""
modified = False
work_list = list(self.func_ir.blocks.items())
debug_print = _make_debug_print("InlineClosureCallPass")
debug_print("START")
while work_list:
label, block = work_list.pop()
for i in range(len(block.body)):
instr = block.body[i]
if isinstance(instr, ir.Assign):
lhs = instr.target
expr = instr.value
if isinstance(expr, ir.Expr) and expr.op == 'call':
func_def = guard(get_definition, self.func_ir, expr.func)
debug_print("found call to ", expr.func, " def = ", func_def)
if isinstance(func_def, ir.Expr) and func_def.op == "make_function":
new_blocks = self.inline_closure_call(block, i, func_def)
for block in new_blocks:
work_list.append(block)
modified = True
# current block is modified, skip the rest
break
if enable_inline_arraycall:
# Identify loop structure
if modified:
# Need to do some cleanups if closure inlining kicked in
merge_adjacent_blocks(self.func_ir)
cfg = compute_cfg_from_blocks(self.func_ir.blocks)
debug_print("start inline arraycall")
_debug_dump(cfg)
loops = cfg.loops()
sized_loops = [(k, len(loops[k].body)) for k in loops.keys()]
visited = []
# We go over all loops, bigger loops first (outer first)
for k, s in sorted(sized_loops, key=lambda tup: tup[1], reverse=True):
visited.append(k)
if guard(_inline_arraycall, self.func_ir, cfg, visited, loops[k],
self.flags.auto_parallel):
modified = True
if modified:
_fix_nested_array(self.func_ir)
if modified:
remove_dels(self.func_ir.blocks)
# repeat dead code elimintation until nothing can be further
# removed
while (remove_dead(self.func_ir.blocks, self.func_ir.arg_names)):
pass
self.func_ir.blocks = rename_labels(self.func_ir.blocks)
debug_print("END")
def inline_closure_call(self, block, i, callee):
"""Inline the body of `callee` at its callsite (`i`-th instruction of `block`)
"""
scope = block.scope
instr = block.body[i]
call_expr = instr.value
debug_print = _make_debug_print("inline_closure_call")
debug_print("Found closure call: ", instr, " with callee = ", callee)
func_ir = self.func_ir
# first, get the IR of the callee
callee_ir = self.get_ir_of_code(callee.code)
callee_blocks = callee_ir.blocks
# 1. relabel callee_ir by adding an offset
max_label = max(func_ir.blocks.keys())
callee_blocks = add_offset_to_labels(callee_blocks, max_label + 1)
callee_ir.blocks = callee_blocks
min_label = min(callee_blocks.keys())
max_label = max(callee_blocks.keys())
# reset globals in ir_utils before we use it
ir_utils._max_label = max_label
debug_print("After relabel")
_debug_dump(callee_ir)
# 2. rename all local variables in callee_ir with new locals created in func_ir
callee_scopes = _get_all_scopes(callee_blocks)
debug_print("callee_scopes = ", callee_scopes)
# one function should only have one local scope
assert(len(callee_scopes) == 1)
callee_scope = callee_scopes[0]
var_dict = {}
for var in callee_scope.localvars._con.values():
if not (var.name in callee.code.co_freevars):
new_var = scope.define(mk_unique_var(var.name), loc=var.loc)
var_dict[var.name] = new_var
debug_print("var_dict = ", var_dict)
replace_vars(callee_blocks, var_dict)
debug_print("After local var rename")
_debug_dump(callee_ir)
# 3. replace formal parameters with actual arguments
args = list(call_expr.args)
if callee.defaults:
debug_print("defaults = ", callee.defaults)
if isinstance(callee.defaults, tuple): # Python 3.5
args = args + list(callee.defaults)
elif isinstance(callee.defaults, ir.Var) or isinstance(callee.defaults, str):
defaults = func_ir.get_definition(callee.defaults)
assert(isinstance(defaults, ir.Const))
loc = defaults.loc
args = args + [ir.Const(value=v, loc=loc)
for v in defaults.value]
else:
raise NotImplementedError(
"Unsupported defaults to make_function: {}".format(defaults))
_replace_args_with(callee_blocks, args)
debug_print("After arguments rename: ")
_debug_dump(callee_ir)
# 4. replace freevar with actual closure var
if callee.closure:
closure = func_ir.get_definition(callee.closure)
assert(isinstance(closure, ir.Expr)
and closure.op == 'build_tuple')
assert(len(callee.code.co_freevars) == len(closure.items))
debug_print("callee's closure = ", closure)
_replace_freevars(callee_blocks, closure.items)
debug_print("After closure rename")
_debug_dump(callee_ir)
# 5. split caller blocks into two
new_blocks = []
new_block = ir.Block(scope, block.loc)
new_block.body = block.body[i + 1:]
new_label = next_label()
func_ir.blocks[new_label] = new_block
new_blocks.append((new_label, new_block))
block.body = block.body[:i]
block.body.append(ir.Jump(min_label, instr.loc))
# 6. replace Return with assignment to LHS
topo_order = find_topo_order(callee_blocks)
_replace_returns(callee_blocks, instr.target, new_label)
# remove the old definition of instr.target too
if (instr.target.name in func_ir._definitions):
func_ir._definitions[instr.target.name] = []
# 7. insert all new blocks, and add back definitions
for label in topo_order:
# block scope must point to parent's
block = callee_blocks[label]
block.scope = scope
_add_definitions(func_ir, block)
func_ir.blocks[label] = block
new_blocks.append((label, block))
debug_print("After merge in")
_debug_dump(func_ir)
return new_blocks
def get_ir_of_code(self, fcode):
"""
Compile a code object to get its IR.
"""
glbls = self.func_ir.func_id.func.__globals__
nfree = len(fcode.co_freevars)
func_env = "\n".join([" c_%d = None" % i for i in range(nfree)])
func_clo = ",".join(["c_%d" % i for i in range(nfree)])
func_arg = ",".join(["x_%d" % i for i in range(fcode.co_argcount)])
func_text = "def g():\n%s\n def f(%s):\n return (%s)\n return f" % (
func_env, func_arg, func_clo)
loc = {}
exec(func_text, glbls, loc)
# hack parameter name .0 for Python 3 versions < 3.6
if utils.PYVERSION >= (3,) and utils.PYVERSION < (3, 6):
co_varnames = list(fcode.co_varnames)
if co_varnames[0] == ".0":
co_varnames[0] = "implicit0"
fcode = types.CodeType(
fcode.co_argcount,
fcode.co_kwonlyargcount,
fcode.co_nlocals,
fcode.co_stacksize,
fcode.co_flags,
fcode.co_code,
fcode.co_consts,
fcode.co_names,
tuple(co_varnames),
fcode.co_filename,
fcode.co_name,
fcode.co_firstlineno,
fcode.co_lnotab,
fcode.co_freevars,
fcode.co_cellvars)
f = loc['g']()
f.__code__ = fcode
f.__name__ = fcode.co_name
ir = self.run_frontend(f)
return ir
def _make_debug_print(prefix):
def debug_print(*args):
if config.DEBUG_INLINE_CLOSURE:
print(prefix + ": " + "".join(str(x) for x in args))
return debug_print
def _debug_dump(func_ir):
if config.DEBUG_INLINE_CLOSURE:
func_ir.dump()
def _get_all_scopes(blocks):
"""Get all block-local scopes from an IR.
"""
all_scopes = []
for label, block in blocks.items():
if not (block.scope in all_scopes):
all_scopes.append(block.scope)
return all_scopes
def _replace_args_with(blocks, args):
"""
Replace ir.Arg(...) with real arguments from call site
"""
for label, block in blocks.items():
assigns = block.find_insts(ir.Assign)
for stmt in assigns:
if isinstance(stmt.value, ir.Arg):
idx = stmt.value.index
assert(idx < len(args))
stmt.value = args[idx]
def _replace_freevars(blocks, args):
"""
Replace ir.FreeVar(...) with real variables from parent function
"""
for label, block in blocks.items():
assigns = block.find_insts(ir.Assign)
for stmt in assigns:
if isinstance(stmt.value, ir.FreeVar):
idx = stmt.value.index
assert(idx < len(args))
stmt.value = args[idx]
def _replace_returns(blocks, target, return_label):
"""
Return return statement by assigning directly to target, and a jump.
"""
for label, block in blocks.items():
casts = []
for i in range(len(block.body)):
stmt = block.body[i]
if isinstance(stmt, ir.Return):
assert(i + 1 == len(block.body))
block.body[i] = ir.Assign(stmt.value, target, stmt.loc)
block.body.append(ir.Jump(return_label, stmt.loc))
# remove cast of the returned value
for cast in casts:
if cast.target.name == stmt.value.name:
cast.value = cast.value.value
elif isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr) and stmt.value.op == 'cast':
casts.append(stmt)
def _add_definitions(func_ir, block):
"""
Add variable definitions found in a block to parent func_ir.
"""
definitions = func_ir._definitions
assigns = block.find_insts(ir.Assign)
for stmt in assigns:
definitions[stmt.target.name].append(stmt.value)
def _find_arraycall(func_ir, block):
"""Look for statement like "x = numpy.array(y)" or "x[..] = y"
immediately after the closure call that creates list y (the i-th
statement in block). Return the statement index if found, or
raise GuardException.
"""
array_var = None
array_call_index = None
list_var_dead_after_array_call = False
list_var = None
i = 0
while i < len(block.body):
instr = block.body[i]
if isinstance(instr, ir.Del):
# Stop the process if list_var becomes dead
if list_var and array_var and instr.value == list_var.name:
list_var_dead_after_array_call = True
break
pass
elif isinstance(instr, ir.Assign):
# Found array_var = array(list_var)
lhs = instr.target
expr = instr.value
if (guard(find_callname, func_ir, expr) == ('array', 'numpy') and
isinstance(expr.args[0], ir.Var)):
list_var = expr.args[0]
array_var = lhs
array_stmt_index = i
array_kws = dict(expr.kws)
elif (isinstance(instr, ir.SetItem) and
isinstance(instr.value, ir.Var) and
not list_var):
list_var = instr.value
# Found array_var[..] = list_var, the case for nested array
array_var = instr.target
array_def = get_definition(func_ir, array_var)
require(guard(_find_unsafe_empty_inferred, func_ir, array_def))
array_stmt_index = i
array_kws = {}
else:
# Bail out otherwise
break
i = i + 1
# require array_var is found, and list_var is dead after array_call.
require(array_var and list_var_dead_after_array_call)
_make_debug_print("find_array_call")(block.body[array_stmt_index])
return list_var, array_stmt_index, array_kws
def _find_iter_range(func_ir, range_iter_var):
"""Find the iterator's actual range if it is either range(n), or range(m, n),
otherwise return raise GuardException.
"""
debug_print = _make_debug_print("find_iter_range")
range_iter_def = get_definition(func_ir, range_iter_var)
debug_print("range_iter_var = ", range_iter_var, " def = ", range_iter_def)
require(isinstance(range_iter_def, ir.Expr) and range_iter_def.op == 'getiter')
range_var = range_iter_def.value
range_def = get_definition(func_ir, range_var)
debug_print("range_var = ", range_var, " range_def = ", range_def)
require(isinstance(range_def, ir.Expr) and range_def.op == 'call')
func_var = range_def.func
func_def = get_definition(func_ir, func_var)
debug_print("func_var = ", func_var, " func_def = ", func_def)
require(isinstance(func_def, ir.Global) and func_def.value == range)
nargs = len(range_def.args)
if nargs == 1:
stop = get_definition(func_ir, range_def.args[0], | |
<filename>tests/test_merge.py
# Test unit for decomon with Dense layers
from __future__ import absolute_import
import pytest
import numpy as np
from decomon.layers.decomon_layers import to_monotonic
from decomon.layers.decomon_merge_layers import (
DecomonConcatenate,
DecomonAverage,
DecomonMaximum,
DecomonMinimum,
DecomonAdd,
DecomonSubtract,
DecomonMultiply,
DecomonDot,
)
from tensorflow.keras.layers import Concatenate, Average, Maximum, Minimum, Add, Subtract, Input, Multiply, Dot
from . import (
get_tensor_decomposition_1d_box,
get_standart_values_1d_box,
assert_output_properties_box,
assert_output_properties_box_linear,
get_standard_values_multid_box,
get_tensor_decomposition_multid_box,
)
import tensorflow.python.keras.backend as K
from numpy.testing import assert_almost_equal
from tensorflow.keras.models import Model
@pytest.mark.parametrize(
"n0, mode, floatx",
[
(0, "hybrid", 32),
(1, "hybrid", 32),
(2, "hybrid", 32),
(3, "hybrid", 32),
(4, "hybrid", 32),
(5, "hybrid", 32),
(6, "hybrid", 32),
(7, "hybrid", 32),
(8, "hybrid", 32),
(9, "hybrid", 32),
(0, "forward", 32),
(1, "forward", 32),
(2, "forward", 32),
(3, "forward", 32),
(4, "forward", 32),
(5, "forward", 32),
(6, "forward", 32),
(7, "forward", 32),
(8, "forward", 32),
(9, "forward", 32),
(0, "ibp", 32),
(1, "ibp", 32),
(2, "ibp", 32),
(3, "ibp", 32),
(4, "ibp", 32),
(5, "ibp", 32),
(6, "ibp", 32),
(7, "ibp", 32),
(8, "ibp", 32),
(9, "ibp", 32),
(0, "hybrid", 64),
(1, "hybrid", 64),
(2, "hybrid", 64),
(3, "hybrid", 64),
(4, "hybrid", 64),
(5, "hybrid", 64),
(6, "hybrid", 64),
(7, "hybrid", 64),
(8, "hybrid", 64),
(9, "hybrid", 64),
(0, "forward", 64),
(1, "forward", 64),
(2, "forward", 64),
(3, "forward", 64),
(4, "forward", 64),
(5, "forward", 64),
(6, "forward", 64),
(7, "forward", 64),
(8, "forward", 64),
(9, "forward", 64),
(0, "ibp", 64),
(1, "ibp", 64),
(2, "ibp", 64),
(3, "ibp", 64),
(4, "ibp", 64),
(5, "ibp", 64),
(6, "ibp", 64),
(7, "ibp", 64),
(8, "ibp", 64),
(9, "ibp", 64),
(0, "hybrid", 16),
(1, "hybrid", 16),
(2, "hybrid", 16),
(3, "hybrid", 16),
(4, "hybrid", 16),
(5, "hybrid", 16),
(6, "hybrid", 16),
(7, "hybrid", 16),
(8, "hybrid", 16),
(9, "hybrid", 16),
(0, "forward", 16),
(1, "forward", 16),
(2, "forward", 16),
(3, "forward", 16),
(4, "forward", 16),
(5, "forward", 16),
(6, "forward", 16),
(7, "forward", 16),
(8, "forward", 16),
(9, "forward", 16),
(0, "ibp", 16),
(1, "ibp", 16),
(2, "ibp", 16),
(3, "ibp", 16),
(4, "ibp", 16),
(5, "ibp", 16),
(6, "ibp", 16),
(7, "ibp", 16),
(8, "ibp", 16),
(9, "ibp", 16),
],
)
def test_DecomonAdd_1D_box(n0, mode, floatx):
K.set_floatx("float{}".format(floatx))
eps = K.epsilon()
decimal = 5
if floatx == 16:
K.set_epsilon(1e-2)
decimal = 2
decomon_op = DecomonAdd(dc_decomp=False, mode=mode)
inputs_0 = get_tensor_decomposition_1d_box(dc_decomp=False)
inputs_1 = get_tensor_decomposition_1d_box(dc_decomp=False)
inputs_0_ = get_standart_values_1d_box(n0, dc_decomp=False)
inputs_1_ = get_standart_values_1d_box(n0, dc_decomp=False)
x0, y0, z0, u_c0, W_u0, b_u0, l_c0, W_l0, b_l0 = inputs_0
x1, y1, z1, u_c1, W_u1, b_u1, l_c1, W_l1, b_l1 = inputs_1
x0_, y0_, z0_, u_c0_, W_u0_, b_u0_, l_c0_, W_l0_, b_l0_ = inputs_0_
x1_, y1_, z1_, u_c1_, W_u1_, b_u1_, l_c1_, W_l1_, b_l1_ = inputs_1_
if mode == "hybrid":
output_decomon = decomon_op(inputs_0[2:] + inputs_1[2:])
if mode == "forward":
output_decomon = decomon_op([z0, W_u0, b_u0, W_l0, b_l0] + [z1, W_u1, b_u1, W_l1, b_l1])
if mode == "ibp":
output_decomon = decomon_op([u_c0, l_c0] + [u_c1, l_c1])
model = Model(inputs_0[2:] + inputs_1[2:], output_decomon)
# output_ = K.function(inputs_0[1:]+inputs_1[1:], output_decomon)(inputs_0_[1:]+inputs_1_[1:])
output_ = model.predict(inputs_0_[2:] + inputs_1_[2:])
y_ = y0_ + y1_
z_ = z0_
u_, w_u_, b_u_, l_, w_l_, b_l_ = [None] * 6
if mode == "hybrid":
z_, u_, w_u_, b_u_, l_, w_l_, b_l_ = output_
if mode == "forward":
z_, w_u_, b_u_, w_l_, b_l_ = output_
if mode == "ibp":
u_, l_ = output_
assert_output_properties_box(
inputs_0_[0], y_, None, None, z_[:, 0], z_[:, 1], u_, w_u_, b_u_, l_, w_l_, b_l_, name="add", decimal=decimal
)
K.set_floatx("float{}".format(32))
K.set_epsilon(eps)
@pytest.mark.parametrize(
"n0, mode, floatx",
[
(0, "hybrid", 32),
(1, "hybrid", 32),
(2, "hybrid", 32),
(3, "hybrid", 32),
(4, "hybrid", 32),
(5, "hybrid", 32),
(6, "hybrid", 32),
(7, "hybrid", 32),
(8, "hybrid", 32),
(9, "hybrid", 32),
(0, "forward", 32),
(1, "forward", 32),
(2, "forward", 32),
(3, "forward", 32),
(4, "forward", 32),
(5, "forward", 32),
(6, "forward", 32),
(7, "forward", 32),
(8, "forward", 32),
(9, "forward", 32),
(0, "ibp", 32),
(1, "ibp", 32),
(2, "ibp", 32),
(3, "ibp", 32),
(4, "ibp", 32),
(5, "ibp", 32),
(6, "ibp", 32),
(7, "ibp", 32),
(8, "ibp", 32),
(9, "ibp", 32),
(0, "hybrid", 64),
(1, "hybrid", 64),
(2, "hybrid", 64),
(3, "hybrid", 64),
(4, "hybrid", 64),
(5, "hybrid", 64),
(6, "hybrid", 64),
(7, "hybrid", 64),
(8, "hybrid", 64),
(9, "hybrid", 64),
(0, "forward", 64),
(1, "forward", 64),
(2, "forward", 64),
(3, "forward", 64),
(4, "forward", 64),
(5, "forward", 64),
(6, "forward", 64),
(7, "forward", 64),
(8, "forward", 64),
(9, "forward", 64),
(0, "ibp", 64),
(1, "ibp", 64),
(2, "ibp", 64),
(3, "ibp", 64),
(4, "ibp", 64),
(5, "ibp", 64),
(6, "ibp", 64),
(7, "ibp", 64),
(8, "ibp", 64),
(9, "ibp", 64),
(0, "hybrid", 16),
(1, "hybrid", 16),
(2, "hybrid", 16),
(3, "hybrid", 16),
(4, "hybrid", 16),
(5, "hybrid", 16),
(6, "hybrid", 16),
(7, "hybrid", 16),
(8, "hybrid", 16),
(9, "hybrid", 16),
(0, "forward", 16),
(1, "forward", 16),
(2, "forward", 16),
(3, "forward", 16),
(4, "forward", 16),
(5, "forward", 16),
(6, "forward", 16),
(7, "forward", 16),
(8, "forward", 16),
(9, "forward", 16),
(0, "ibp", 16),
(1, "ibp", 16),
(2, "ibp", 16),
(3, "ibp", 16),
(4, "ibp", 16),
(5, "ibp", 16),
(6, "ibp", 16),
(7, "ibp", 16),
(8, "ibp", 16),
(9, "ibp", 16),
],
)
def test_DecomonAverage_1D_box(n0, mode, floatx):
K.set_floatx("float{}".format(floatx))
eps = K.epsilon()
decimal = 5
if floatx == 16:
K.set_epsilon(1e-2)
decimal = 2
decomon_op = DecomonAverage(dc_decomp=False, mode=mode)
inputs_0 = get_tensor_decomposition_1d_box(dc_decomp=False)
inputs_1 = get_tensor_decomposition_1d_box(dc_decomp=False)
inputs_0_ = get_standart_values_1d_box(n0, dc_decomp=False)
inputs_1_ = get_standart_values_1d_box(n0, dc_decomp=False)
x0, y0, z0, u_c0, W_u0, b_u0, l_c0, W_l0, b_l0 = inputs_0
x1, y1, z1, u_c1, W_u1, b_u1, l_c1, W_l1, b_l1 = inputs_1
x0_, y0_, z0_, u_c0_, W_u0_, b_u0_, l_c0_, W_l0_, b_l0_ = inputs_0_
x1_, y1_, z1_, u_c1_, W_u1_, b_u1_, l_c1_, W_l1_, b_l1_ = inputs_1_
if mode == "hybrid":
output_decomon = decomon_op(inputs_0[2:] + inputs_1[2:])
if mode == "forward":
output_decomon = decomon_op([z0, W_u0, b_u0, W_l0, b_l0] + [z1, W_u1, b_u1, W_l1, b_l1])
if mode == "ibp":
output_decomon = decomon_op([u_c0, l_c0] + [u_c1, l_c1])
model = Model(inputs_0[2:] + inputs_1[2:], output_decomon)
# output_ = K.function(inputs_0[1:]+inputs_1[1:], output_decomon)(inputs_0_[1:]+inputs_1_[1:])
output_ = model.predict(inputs_0_[2:] + inputs_1_[2:])
z_ = z0_
y_ = (y0_ + y1_) / 2.0
u_, w_u_, b_u_, l_, w_l_, b_l_ = [None] * 6
if mode == "hybrid":
z_, u_, w_u_, b_u_, l_, w_l_, b_l_ = output_
if mode == "forward":
z_, w_u_, b_u_, w_l_, b_l_ = output_
if mode == "ibp":
u_, l_ = output_
assert_output_properties_box(
inputs_0_[0], y_, None, None, z_[:, 0], z_[:, 1], u_, w_u_, b_u_, l_, w_l_, b_l_, name="add", decimal=decimal
)
K.set_floatx("float{}".format(32))
K.set_epsilon(eps)
@pytest.mark.parametrize(
"n0, mode, floatx",
[
(0, "hybrid", 32),
(1, "hybrid", 32),
(2, "hybrid", 32),
(3, "hybrid", 32),
(4, "hybrid", 32),
(5, "hybrid", 32),
(6, "hybrid", 32),
(7, "hybrid", 32),
(8, "hybrid", 32),
(9, "hybrid", 32),
(0, "forward", 32),
(1, "forward", 32),
(2, "forward", 32),
(3, "forward", 32),
(4, "forward", 32),
(5, "forward", 32),
(6, "forward", 32),
(7, "forward", 32),
(8, "forward", 32),
(9, "forward", 32),
(0, "ibp", 32),
(1, "ibp", 32),
(2, "ibp", 32),
(3, "ibp", 32),
(4, "ibp", 32),
(5, "ibp", 32),
(6, "ibp", 32),
(7, "ibp", 32),
(8, "ibp", 32),
(9, "ibp", 32),
(0, "hybrid", 64),
(1, "hybrid", 64),
(2, "hybrid", 64),
(3, "hybrid", 64),
(4, "hybrid", 64),
(5, "hybrid", 64),
(6, "hybrid", 64),
(7, "hybrid", 64),
(8, "hybrid", 64),
(9, "hybrid", 64),
(0, "forward", 64),
(1, "forward", 64),
(2, "forward", 64),
(3, "forward", 64),
(4, "forward", 64),
(5, "forward", 64),
(6, "forward", 64),
(7, "forward", 64),
(8, "forward", 64),
(9, "forward", 64),
(0, "ibp", 64),
(1, "ibp", 64),
(2, "ibp", 64),
(3, "ibp", 64),
(4, "ibp", 64),
(5, "ibp", 64),
(6, "ibp", 64),
(7, "ibp", 64),
(8, "ibp", 64),
(9, "ibp", 64),
(0, "hybrid", 16),
(1, "hybrid", 16),
(2, "hybrid", 16),
(3, "hybrid", 16),
(4, "hybrid", 16),
(5, "hybrid", 16),
(6, "hybrid", 16),
(7, "hybrid", 16),
(8, "hybrid", 16),
(9, "hybrid", 16),
(0, "forward", 16),
(1, "forward", 16),
(2, "forward", 16),
(3, "forward", 16),
(4, "forward", 16),
(5, "forward", 16),
(6, "forward", 16),
(7, "forward", 16),
(8, "forward", 16),
(9, "forward", 16),
(0, "ibp", 16),
(1, "ibp", 16),
(2, "ibp", 16),
(3, "ibp", 16),
(4, "ibp", 16),
(5, "ibp", 16),
(6, "ibp", | |
(pickle file) created with the function cover_dom_mp.
Parameters
----------
filename : name of the pickle file containing the data
Returns
----------
den, col_den, median_mp, avg_mp, centers
'''
with open(filename, "rb") as all_data:
data = pickle.load(all_data)
den = data['density']
col_den = data['column_density']
median_mp = data['median']
avg_mp = data['average']
centers = data['centers']
return den, col_den, median_mp, avg_mp, centers
def create_plot(filename, p=False):
'''
Creates a plot of the log10 of the median column density within the sampled spheres as a
function of the log10 of density of those spheres.
Parameters
----------
filename : the name of the data file
Returns
----------
Plot of log10 of median column density within the sampled spheres as a
function of the log10 of density of those spheres.
If p parameter set to True, also saves figure.
'''
pattern = r'\d*\.\d+|\d+'
nums = re.findall(pattern, filename)
with open(filename, "rb") as all_data:
data = pickle.load(all_data)
den = data['density']
col_den = data['column_density']
median_mp = data['median']
avg_mp = data['average']
centers = data['centers']
pc = 3.085e18
rad = int(nums[3])
nside = int(nums[1])
A = 4 * np.pi * (rad*pc)**2
#area of a pixel in cm^2
A_cm = A/hp.nside2npix(nside)
width = [] #difference between higher and lower percentile
for i in range(len(col_den)):
width.append(np.percentile(np.log10(col_den[i]/A_cm), 84) - np.percentile(np.log10(col_den[i]/A_cm), 16))
plt.figure(figsize=(10,7.5))
plt.scatter(np.log10(den), np.log10(median_mp/A_cm),c=width)
plt.colorbar()
plt.title('Time = {} Myrs'.format(int(nums[4])/100))
plt.xlabel(r"$\rho_S$ [log10($g/cm^3$)]")
plt.ylabel(r"$\Sigma_S$ log10($g/cm^2$)")
plt.xlim(-28.5,-21)
plt.ylim(-9,-3)
if p == True:
plt.savefig("nside{}_{}pc_R{}_{}.pdf".format(nums[1],nums[2], nums[3],nums[4]))
plt.show()
def width_plot(filename, high = 84, low = 16, p = False):
'''
Creates a plot of the difference between selected percentiles (by default 84 and 16)
as a function of sphere density.
Parameters
----------
filename :
high :
low :
Returns
----------
Scatter plot
'''
plt.figure(figsize=(10,7.5))
#pattern = r'[0-9]+'
pattern = r'\d*\.\d+|\d+'
nums = re.findall(pattern, filename)
with open(filename, "rb") as all_data:
data = pickle.load(all_data)
den = data['density']
col_den = data['column_density']
median_mp = data['median']
avg_mp = data['average']
centers = data['centers']
width = [] #difference between higher and lower percentile
for i in range(len(col_den)):
width.append(np.percentile(np.log10(col_den[i]), high) - np.percentile(np.log10(col_den[i]), low))
#fin = np.isfinite(np.asarray(width))
plt.scatter(np.log10(np.asarray(den)), np.asarray(width))
plt.ylabel(r'$\Sigma_{}^{}$ [log10($g/cm^2$)]'
.format('{'+str(low)+'}','{'+str(high)+'}',high, low), wrap = True)
plt.xlabel(r'$\rho_S$ [log10($g/cm^3$)]')
plt.title('Time = {} Myrs'
.format(int(nums[4])/100))
plt.xlim(-28.5,-21)
plt.ylim(top=4.8)
if p == True:
plt.savefig("width_nside{}_{}pc_R{}_{}.pdf".format(nums[1],nums[2], nums[3],nums[4]))
plt.show()
def col_den_hist(filename, i, plot=False):
'''
Returns column density for a particular sphere in the domain.
Optionally, creates histograms for that sphere.
'''
with open(filename, "rb") as all_data:
data = pickle.load(all_data)
#den = data['density']
col_den = data['column_density']
#median_mp = data['median']
#avg_mp = data['average']
if plot == True:
plt.hist(np.log10(col_den[i]))
plt.xlabel("Column density (log10)")
return col_den[i]
def covering_fraction(filename, threshold):
'''
Returns covering fraction as a function of density in a sphere. Covering fraction is defined as
how many lines of sight within a sphere have column density above a certain threshold divided by the
total lines of sight (i.e., pixels covering the sphere).
Parameters:
-----------
filename :
thershold:
nside :
Returns:
----------
den :
cov :
z :
'''
#pattern = r'[0-9]+'
pattern = r'\d*\.\d+|\d+'
rad = int(re.findall(pattern, filename)[3])
nside = int(re.findall(pattern, filename)[1])
pc = 3.085e18
A = 4 * np.pi * (rad*pc)**2
#area of a pixel in cm^2
A_cm = A/hp.nside2npix(nside)
n_H = 6e+23 #number of hydrogen atoms in one gram of gas
with open(filename, "rb") as all_data:
data = pickle.load(all_data)
den = data['density']
col_den = data['column_density']
median_mp = data['median']
avg_mp = data['average']
centers = data['centers']
#particles per cm^2
b = col_den*n_H/A_cm
'''cov = []
for i in range(len(col_den)):
above = np.where(b[i] > threshold)
cov.append(len(b[above])/len(b))'''
cov = []
for i in range(len(col_den)):
#above = np.where(b[i] > threshold)
#cov.append(len(b[above])/len(b[0]))
#cov.append(len(np.where(b[i] > threshold)[0])/len(b[0])) hp.nside2npix(nside)
cov.append(len(np.where(b[i] > threshold)[0])/hp.nside2npix(nside))
z = centers[:,2]
return den, cov, z
def cov_plots(name):
'''
Covering fraction plots for various thresholds, starting from 1e+15 to 1e+20 particles cm^-2.
'''
with open(name, "rb") as all_data:
data = pickle.load(all_data)
den = data['density']
col_den = data['column_density']
median_mp = data['median']
avg_mp = data['average']
centers = data['centers']
den_part = den*6e+23
pattern = r'\d*\.\d+|\d+'
nside = int(re.findall(pattern, name)[1])
th = 1e+17
fig, ax = plt.subplots(2,3, figsize=(20,15), sharex=True, sharey=True)
t = 0
for i in range(2):
for j in range(3):
den, cov, z = covering_fraction(name, th)
ax[i][j].scatter(np.log10(den_part), cov)
ax[i][j].set_title("{:.0e} particles per $cm^2$"
.format(th), wrap=True)
ax[1][j].set_xlabel("Density [log10($particles/cm^3$)]")
ax[i][0].set_ylabel("$\kappa$")
th *= 10
def avg_width(filename, high = 75, low = 25):
with open(filename, "rb") as all_data:
data = pickle.load(all_data)
den = data['density']
col_den = data['column_density']
median_mp = data['median']
avg_mp = data['average']
centers = data['centers']
width = [] #difference between higher and lower percentile
for i in range(len(col_den)):
width.append(np.percentile(np.log10(col_den[i]), high) - np.percentile(np.log10(col_den[i]), low))
avgw = np.average(width)
return avgw
def width_den(filename, high = 75, low = 25):
with open(filename, "rb") as all_data:
data = pickle.load(all_data)
den = data['density']
col_den = data['column_density']
median_mp = data['median']
avg_mp = data['average']
centers = data['centers']
width = [] #difference between higher and lower percentile
width.append(np.percentile(np.log10(den), high) - np.percentile(np.log10(den), low))
return width
def sigmoid(x, a, b):
return 1.0 / (1.0 + np.exp(-a*(x-b)))
'''def avg_cd(filename):
#Average column density in a snapshot.
with open(filename, "rb") as all_data:
data = pickle.load(all_data)
den = data['density']
col_den = data['column_density']
median_mp = data['median']
avg_mp = data['average']
centers = data['centers']
avg_cd = []
for i in range(len(col_den)):
avg_cd.append(np.average(np.log10(col_den[i])))
avg_cd = np.average(avg_cd)
return avg_cd'''
def avg_cd(filename):
'''
Average column density in a snapshot.
'''
pattern = r'\d*\.\d+|\d+'
nums = re.findall(pattern, filename)
pc = 3.085e18
rad = int(nums[3])
nside = int(nums[1])
A = 4 * np.pi * (rad*pc)**2
#area of a pixel in cm^2
A_cm = A/hp.nside2npix(nside)
with open(filename, "rb") as all_data:
data = pickle.load(all_data)
den = data['density']
col_den = data['column_density']
median_mp = data['median']
avg_mp = data['average']
centers = data['centers']
avg_cd = []
for i in range(len(col_den)):
#cd = col_den[i][col_den[i] != 0.0]
cd = np.where(col_den[i] == 0,np.average(col_den[i]), col_den[i]) #excludes columns with 0 column density and
avg_cd.append(np.log10(cd/A_cm)) #replaces them with the average value
avg = np.average(avg_cd)
return avg
def avg_den(filename):
'''
Average density in a snapshot.
'''
with open(filename, "rb") as all_data:
data = pickle.load(all_data)
den = data['density']
col_den = data['column_density']
median_mp = data['median']
avg_mp = data['average']
centers = data['centers']
avg_den = [] #difference between higher and lower percentile
for i in range(len(den)):
avg_den.append(np.log10(den))
avg_den = np.mean(avg_den)
return avg_den
def cov_plot_fit(name, th=1e18):
'''
Creates a plot of covering fraction with sigmoid function fit.
'''
with open(name, "rb") as all_data:
data = pickle.load(all_data)
den = data['density']
col_den = data['column_density']
median_mp = data['median']
avg_mp = data['average']
centers = data['centers']
den_part = den*6e+23
pattern = r'\d*\.\d+|\d+'
nside = int(re.findall(pattern, name)[1])
res = float(re.findall(pattern, name)[2])
snap = int(re.findall(pattern, name)[-1])
#th = 1e18
plt.figure(figsize=(10,7.5))
den, cov, z = covering_fraction(name, th)
plt.scatter(np.log10(den_part), cov, alpha=0.2, label="Data")
plt.title("[Snapshot {}, resolution {} pc] Covering fraction as a function of density \n with threshold {:.0e} particles per cm^2"
.format(snap, res, th), wrap=True)
#ax[1][1].set_xlabel("Density (log10 $particles/cm^3$)")
#ax[0][0].set_ylabel("Covering fraction")
bin_means, bin_edges, binnumber = stats.binned_statistic(np.log10(den_part),
cov, statistic=np.mean, bins=10)
bin_std, bin_edges1, binnumber1 = stats.binned_statistic(np.log10(den_part),
cov, statistic=np.std, bins=10)
bin_mid = []
for j in range(len(bin_edges)-1):
bin_mid.append((bin_edges[j]+bin_edges[j+1])/2)
#turn scatter plot into line plot (when data is not in any particular order)
#see #https://stackoverflow.com/questions/37414916/pythons-matplotlib-plotting-in-wrong-order
plt.scatter(bin_mid, bin_means, s=50, label="Grouped points")
popt, pcov1 = curve_fit(sigmoid, np.log10(den_part), cov)
x = np.log10(den_part)
y = sigmoid(np.log10(den_part), *popt)
lists = sorted(zip(*[x, y]))
new_x, new_y = list(zip(*lists))
plt.errorbar(bin_mid, bin_means, yerr=bin_std, c='orange', fmt=" ")
#plt.scatter(np.log10(den_part), sigmoid(np.log10(den_part), *popt), label="Fit to data")
plt.plot(new_x,new_y,c='green',lw=4,label="Fit to data")
print(popt)
#plt.scatter(np.log10(den_part), sigmoid(np.log10(den_part), *popt1))
| |
"""
Access to the Soliscloud API for PV monitoring.
Works for all Ginlong brands using the Soliscloud API
For more information: https://github.com/hultenvp/solis-sensor/
"""
from __future__ import annotations
import hashlib
#from hashlib import sha1
import hmac
import base64
import asyncio
from datetime import datetime
from datetime import timezone
from http import HTTPStatus
import json
import logging
from typing import Any
from aiohttp import ClientError, ClientSession
import async_timeout
from .ginlong_base import BaseAPI, GinlongData, PortalConfig
from .ginlong_const import *
from .soliscloud_const import *
_LOGGER = logging.getLogger(__name__)
# VERSION
VERSION = '0.1.4'
# Response constants
SUCCESS = 'Success'
CONTENT = 'Content'
STATUS_CODE = 'StatusCode'
MESSAGE = 'Message'
#VALUE_RECORD = '_from_record'
#VALUE_ELEMENT = ''
VERB = "POST"
INVERTER_DETAIL = '/v1/api/inveterDetail'
PLANT_DETAIL = '/v1/api/stationDetail'
InverterDataType = dict[str, dict[str, list]]
"""{endpoint: [payload type, {key type, decimal precision}]}"""
INVERTER_DATA: InverterDataType = {
INVERTER_DETAIL: {
INVERTER_SERIAL: ['sn', str, None],
INVERTER_PLANT_ID: ['stationId', str, None],
INVERTER_DEVICE_ID: ['id', str, None],
INVERTER_DATALOGGER_SERIAL: ['collectorId', str, None],
# Timestamp of measurement
INVERTER_TIMESTAMP_UPDATE: ['dataTimestamp', int, None],
INVERTER_STATE: ['state', int, None],
INVERTER_TEMPERATURE: ['inverterTemperature', float, 1],
INVERTER_POWER_STATE: ['currentState', int, None],
INVERTER_ACPOWER: ['pac', float, 3],
INVERTER_ACPOWER_STR: ['pacStr', str, None],
INVERTER_ACFREQUENCY: ['fac', float, 2],
INVERTER_ENERGY_TODAY: ['eToday', float, 2],
INVERTER_ENERGY_THIS_MONTH: ['eMonth', float, 2],
INVERTER_ENERGY_THIS_YEAR: ['eYear', float, 2],
INVERTER_ENERGY_THIS_YEAR_STR: ['eYearStr', str, None],
INVERTER_ENERGY_TOTAL_LIFE: ['eTotal', float, 2],
INVERTER_ENERGY_TOTAL_LIFE_STR: ['eTotalStr', str, None],
STRING_COUNT: ['dcInputtype', int, None],
STRING1_VOLTAGE: ['uPv1', float, 2],
STRING2_VOLTAGE: ['uPv2', float, 2],
STRING3_VOLTAGE: ['uPv3', float, 2],
STRING4_VOLTAGE: ['uPv4', float, 2],
STRING1_CURRENT: ['iPv1', float, 2],
STRING2_CURRENT: ['iPv2', float, 2],
STRING3_CURRENT: ['iPv3', float, 2],
STRING4_CURRENT: ['iPv4', float, 2],
STRING1_POWER: ['pow1', float, 2], # Undocumented
STRING2_POWER: ['pow2', float, 2], # Undocumented
STRING3_POWER: ['pow3', float, 2], # Undocumented
STRING4_POWER: ['pow4', float, 2], # Undocumented
PHASE1_VOLTAGE: ['uAc1', float, 2],
PHASE2_VOLTAGE: ['uAc2', float, 2],
PHASE3_VOLTAGE: ['uAc3', float, 2],
PHASE1_CURRENT: ['iAc1', float, 2],
PHASE2_CURRENT: ['iAc2', float, 2],
PHASE3_CURRENT: ['iAc3', float, 2],
BAT_POWER: ['batteryPower', float, 3],
BAT_POWER_STR: ['batteryPowerStr', str, None],
BAT_REMAINING_CAPACITY: ['batteryCapacitySoc', float, 2],
BAT_TOTAL_ENERGY_CHARGED: ['batteryTotalChargeEnergy', float, 2],
BAT_TOTAL_ENERGY_DISCHARGED: ['batteryTotalDischargeEnergy', float, 2],
BAT_DAILY_ENERGY_CHARGED: ['batteryTodayChargeEnergy', float, 2],
BAT_DAILY_ENERGY_DISCHARGED: ['batteryTodayDischargeEnergy', float, 2],
GRID_DAILY_ON_GRID_ENERGY: ['gridSellTodayEnergy', float, 2],
GRID_DAILY_ENERGY_PURCHASED: ['gridPurchasedTodayEnergy', float, 2],
GRID_DAILY_ENERGY_USED: ['homeLoadTodayEnergy', float, 2],
GRID_MONTHLY_ENERGY_PURCHASED: ['gridPurchasedMonthEnergy', float, 2],
GRID_YEARLY_ENERGY_PURCHASED: ['gridPurchasedYearEnergy', float, 2],
GRID_TOTAL_ON_GRID_ENERGY: ['gridSellTotalEnergy', float, 2],
GRID_TOTAL_POWER: ['psum', float, 3],
GRID_TOTAL_POWER_STR: ['psumStr', str, None],
GRID_TOTAL_CONSUMPTION_POWER: ['familyLoadPower', float, 3],
GRID_TOTAL_CONSUMPTION_POWER_STR: ['familyLoadPowerStr', str, None],
GRID_TOTAL_ENERGY_USED: ['homeLoadTotalEnergy', float, 3],
GRID_TOTAL_ENERGY_USED_STR: ['homeLoadTotalEnergyStr', str, None],
},
PLANT_DETAIL: {
INVERTER_LAT: ['latitude', float, 7],
INVERTER_LON: ['longitude', float, 7],
INVERTER_ADDRESS: ['cityStr', str, None],
},
}
class SoliscloudConfig(PortalConfig):
""" Portal configuration data """
def __init__(self,
portal_domain: str,
portal_username: str,
portal_key_id: str,
portal_secret: bytes,
portal_plantid: str
) -> None:
super().__init__(portal_domain, portal_username, portal_plantid)
self._key_id: str = portal_key_id
self._secret: bytes = portal_secret
@property
def key_id(self) -> str:
""" Key ID."""
return self._key_id
@property
def secret(self) -> bytes:
""" API Key."""
return self._secret
class SoliscloudAPI(BaseAPI):
"""Class with functions for reading data from the Soliscloud Portal."""
def __init__(self, config: SoliscloudConfig) -> None:
self._config: SoliscloudConfig = config
self._session: ClientSession | None = None
self._user_id: int | None = None
self._data: dict[str, str | int | float] = {}
self._inverter_list: dict[str, str] | None = None
@property
def config(self) -> SoliscloudConfig:
""" Config this for this API instance."""
return self._config
@property
def is_online(self) -> bool:
""" Returns if we are logged in."""
return self._user_id is not None
async def login(self, session: ClientSession) -> bool:
"""See if we can fetch userId and build a list of inverters"""
self._session = session
self._inverter_list = None
# Building url & params
canonicalized_resource = '/v1/api/addUser'
params = {
"userName": self.config.username,
"userType":0
}
# Request user id
result = await self._post_data_json(canonicalized_resource, params)
if result[SUCCESS] is True:
result_json = result[CONTENT]
try:
self._user_id = result_json['data']
_LOGGER.info('Login Successful!')
# Request inverter list
self._inverter_list = await self.fetch_inverter_list(self.config.plantid)
except KeyError:
_LOGGER.error(
'Unable to communicate with %s, please verify configuration.',
self.config.domain)
self._user_id = None
else:
self._user_id = None
return self.is_online
async def logout(self) -> None:
"""Hand back session """
self._session = None
self._user_id = None
self._inverter_list = None
async def fetch_inverter_list(self, plant_id: str) -> dict[str, str]:
"""
Fetch return list of inverters { inverter serial : device_id }
"""
device_ids = None
params = {
'stationId': plant_id
}
result = await self._post_data_json('/v1/api/inveterList', params)
if result[SUCCESS] is True:
device_ids = {}
result_json: dict = result[CONTENT]
for record in result_json['data']['page']['records']:
serial = record.get('sn')
device_id = record.get('id')
device_ids[serial] = device_id
else:
self._user_id = None
return device_ids
async def fetch_inverter_data(self, inverter_serial: str) -> GinlongData | None:
"""
Fetch data for given inverter. Backend data is optimized for frontend.
Collect available data from payload and store as GinlongData object
"""
_LOGGER.debug("Fetching data for serial: %s", inverter_serial)
self._data = {}
if self.is_online:
if self._inverter_list is not None and inverter_serial in self._inverter_list:
device_id = self._inverter_list[inverter_serial]
payload = await self._get_inverter_details(device_id, inverter_serial)
if payload is not None:
#_LOGGER.debug("%s", payload)
self._collect_inverter_data(payload)
self._post_process()
return GinlongData(self._data)
return None
async def _get_inverter_details(self,
device_id: str,
device_serial: str
) -> dict[str, Any] | None:
"""
Update inverter details
"""
# Get inverter details
params = {
'id': device_id,
'sn': device_serial
}
result = await self._post_data_json(INVERTER_DETAIL, params)
jsondata = None
if result[SUCCESS] is True:
jsondata = result[CONTENT]
else:
_LOGGER.info('Unable to fetch details for device with ID: %s', device_id)
return jsondata
def _collect_inverter_data(self, payload: dict[str, Any]) -> None:
""" Fetch dynamic properties """
jsondata = payload['data']
attributes = INVERTER_DATA[INVERTER_DETAIL]
for dictkey in attributes:
key = attributes[dictkey][0]
type_ = attributes[dictkey][1]
precision = attributes[dictkey][2]
if key is not None:
value = self._get_value(jsondata, key, type_, precision)
if value is not None:
self._data[dictkey] = value
def _post_process(self) -> None:
""" Cleanup received data. """
if self._data:
# Fix timestamps
self._data[INVERTER_TIMESTAMP_UPDATE] = \
float(self._data[INVERTER_TIMESTAMP_UPDATE])/1000
# Convert kW into W depending on unit returned from API.
if self._data[GRID_TOTAL_POWER_STR] == "kW":
self._data[GRID_TOTAL_POWER] = \
float(self._data[GRID_TOTAL_POWER])*1000
self._data[GRID_TOTAL_POWER_STR] = "W"
if self._data[BAT_POWER_STR] == "kW":
self._data[BAT_POWER] = \
float(self._data[BAT_POWER])*1000
self._data[BAT_POWER_STR] = "W"
if self._data[GRID_TOTAL_CONSUMPTION_POWER_STR] == "kW":
self._data[GRID_TOTAL_CONSUMPTION_POWER] = \
float(self._data[GRID_TOTAL_CONSUMPTION_POWER])*1000
self._data[GRID_TOTAL_CONSUMPTION_POWER_STR] = "W"
if self._data[GRID_TOTAL_ENERGY_USED_STR] == "MWh":
self._data[GRID_TOTAL_ENERGY_USED] = \
float(self._data[GRID_TOTAL_ENERGY_USED])*1000
self._data[GRID_TOTAL_ENERGY_USED_STR] = "kWh"
elif self._data[GRID_TOTAL_ENERGY_USED_STR] == "GWh":
self._data[GRID_TOTAL_ENERGY_USED] = \
float(self._data[GRID_TOTAL_ENERGY_USED])*1000*1000
self._data[GRID_TOTAL_ENERGY_USED_STR] = "kWh"
if self._data[INVERTER_ACPOWER_STR] == "kW":
self._data[INVERTER_ACPOWER] = \
float(self._data[INVERTER_ACPOWER])*1000
self._data[INVERTER_ACPOWER_STR] = "W"
if self._data[INVERTER_ENERGY_THIS_YEAR_STR] == "MWh":
self._data[INVERTER_ENERGY_THIS_YEAR] = \
float(self._data[INVERTER_ENERGY_THIS_YEAR])*1000
self._data[INVERTER_ENERGY_THIS_YEAR_STR] = "kWh"
if self._data[INVERTER_ENERGY_TOTAL_LIFE_STR] == "MWh":
self._data[INVERTER_ENERGY_TOTAL_LIFE] = \
float(self._data[INVERTER_ENERGY_TOTAL_LIFE])*1000
self._data[INVERTER_ENERGY_TOTAL_LIFE_STR] = "kWh"
elif self._data[INVERTER_ENERGY_TOTAL_LIFE_STR] == "GWh":
self._data[INVERTER_ENERGY_TOTAL_LIFE] = \
float(self._data[INVERTER_ENERGY_TOTAL_LIFE])*1000*1000
self._data[INVERTER_ENERGY_TOTAL_LIFE_STR] = "kWh"
# Just temporary till SolisCloud is fixed
self._data[GRID_DAILY_ON_GRID_ENERGY] = \
float(self._data[GRID_DAILY_ON_GRID_ENERGY])*10
# Unused phases are still in JSON payload as 0.0, remove them
# FIXME: use acOutputType
self._purge_if_unused(0.0, PHASE1_CURRENT, PHASE1_VOLTAGE)
self._purge_if_unused(0.0, PHASE2_CURRENT, PHASE2_VOLTAGE)
self._purge_if_unused(0.0, PHASE3_CURRENT, PHASE3_VOLTAGE)
# Unused PV chains are still in JSON payload as 0, remove them
# FIXME: use dcInputtype (NB num + 1) Unfortunately so are chains that are
# just making 0 voltage. So this is too simplistic.
# mypy trips over self_data[STRING_COUNT] as it could be of type str, int or float
# needs to be fixed at some point in time, but this works.
for i, stringlist in enumerate(STRING_LISTS):
if i > int(self._data[STRING_COUNT]):
self._purge_if_unused(0, *stringlist)
def _purge_if_unused(self, value: Any, *elements: str) -> None:
for element in elements:
try:
if self._data[element] != value:
return
except KeyError:
return
for element in elements:
self._data.pop(element)
def _get_value(self,
data: dict[str, Any], key: str, type_: type, precision: int = 2
) -> str | int | float | None:
""" Retrieve 'key' from 'data' as type 'type_' with precision 'precision' """
result = None
data_raw = data.get(key)
if data_raw is not None:
result = type_(data_raw)
# Round to specified precision
if type_ is float:
result = round(result, precision)
return result
async def _get_data(self,
url: str,
params: dict[str, Any]
) -> dict[str, Any]:
""" Http-get data from specified url. """
result: dict[str, Any] = {SUCCESS: False, MESSAGE: None, STATUS_CODE: None}
resp = None
if self._session is None:
return result
try:
with async_timeout.timeout(10):
resp = await self._session.get(url, params=params)
result[STATUS_CODE] = resp.status
result[CONTENT] = await resp.json()
if resp.status == HTTPStatus.OK:
result[SUCCESS] = True
result[MESSAGE] = "OK"
else:
result[MESSAGE] = "Got http statuscode: %d" % (resp.status)
return result
except (asyncio.TimeoutError, ClientError) as err:
result[MESSAGE] = "Exception: %s" % err.__class__
_LOGGER.debug("Error: %s", result[MESSAGE])
return result
finally:
if resp is not None:
await resp.release()
def _prepare_header(self, body: dict[str, str], canonicalized_resource: str) -> dict[str, str]:
now = datetime.now(timezone.utc)
date = now.strftime("%a, %d | |
and following: (element of x (row) abscissa), followed
by table data.
From line/row 3 onwards the first element is the x abscissa value
followed by the row of data, one point for each y abscissa value.
The file format can depicted as follows: ::
x-name y-name ordinates-name
0 y1 y2 y3 y4
x1 v11 v12 v13 v14
x2 v21 v22 v23 v24
x3 v31 v32 v33 v34
x4 v41 v42 v43 v44
x5 v51 v52 v53 v54
x6 v61 v62 v63 v64
This function reads the file and returns the individual data items.
Args:
| fname (string): input path and filename
Returns:
| xVec ((np.array[N])): x abscissae
| yVec ((np.array[M])): y abscissae
| data ((np.array[N,M])): data corresponding the x,y
| xlabel (string): x abscissa label
| ylabel (string): y abscissa label
| title (string): dataset title
Raises:
| No exception is raised.
"""
import numpy as np
with open(filename,'r') as f:
lines = f.readlines()
xlabel, ylabel, title = lines[0].split()
aArray = np.loadtxt(filename, skiprows=1, dtype=float)
xVec = aArray[1:, 0]
yVec = aArray[0, 1:]
data = aArray[1:, 1:]
return(xVec, yVec, data, xlabel, ylabel, title)
######################################################################################
def open_HDF(filename, mode='r'):
"""Open and return an HDF5 file with the given filename.
See https://github.com/NelisW/pyradi/blob/master/pyradi/hdf5-as-data-format.md
for more information on using HDF5 as a data structure.
Default open mode is read, cam be overridden by any of these:
'r', 'r+', 'w', 'w-'/'x', 'a'
https://h5py.readthedocs.io/en/stable/quick.html
https://h5py.readthedocs.io/en/stable/high/file.html#opening-creating-files
Args:
| filename (string): name of the file to be opened
Returns:
| HDF5 file.
Raises:
| No exception is raised.
Author: <NAME>
"""
f = h5py.File(filename,mode)
return f
######################################################################################
def erase_create_HDF(filename):
"""Create and return a new HDS5 file with the given filename, erase the file if existing.
See https://github.com/NelisW/pyradi/blob/master/pyradi/hdf5-as-data-format.md
for more information on using HDF5 as a data structure.
open for writing, truncate if exists
https://h5py.readthedocs.io/en/stable/high/file.html#opening-creating-files
Args:
| filename (string): name of the file to be created
Returns:
| HDF5 file.
Raises:
| No exception is raised.
Author: <NAME>
"""
if os.path.isfile(filename):
os.remove(filename)
f = h5py.File(filename,'w')
return f
######################################################################################
def get_HDF_DS(hdf5File, path):
"""Check to see if a path is present in an HDF5 file and then return the dataset.
Args:
| hdf5File (string): HDF5 file to be read
| path (string): path to the dataset
Returns:
| HDF5 contents along the path, or None.
Raises:
| No exception is raised.
Author: <NAME>
"""
if path in hdf5File:
rtn = hdf5File[path]
else:
rtn = None
return rtn
######################################################################################
def print_HDF5_text(vartext):
"""Prints text in visiting algorithm in HDF5 file.
See https://github.com/NelisW/pyradi/blob/master/pyradi/hdf5-as-data-format.md
for more information on using HDF5 as a data structure.
Args:
| vartext (string): string to be printed
Returns:
| HDF5 file.
Raises:
| No exception is raised.
Author: <NAME>
"""
print(vartext)
######################################################################################
def print_HDF5_dataset_value(var, obj):
"""Prints a data set in visiting algorithm in HDF5 file.
See https://github.com/NelisW/pyradi/blob/master/pyradi/hdf5-as-data-format.md
for more information on using HDF5 as a data structure.
Args:
| var (string): path to a dataset
| obj (h5py dataset): dataset to be printed
Returns:
| HDF5 file.
Raises:
| No exception is raised.
Author: <NAME>
"""
if type(obj.file[var]) is h5py._hl.dataset.Dataset:
print(var, obj.file[var].name)
######################################################################################
def get_HDF_branches(hdf5File):
"""Print list of all the branches in the file
See https://github.com/NelisW/pyradi/blob/master/pyradi/hdf5-as-data-format.md
for more information on using HDF5 as a data structure.
Args:
| hdf5File (H5py file): the file to be opened
Returns:
| HDF5 file.
Raises:
| No exception is raised.
Author: <NAME>
"""
return hdf5File.visit(get_HDF_branches)
######################################################################################
def plotHDF5Bitmaps(hfd5f, prefix, pformat='png', lstimgs=None, debug=False):
"""Plot arrays in the HFD5 as scaled bitmap images.
See https://github.com/NelisW/pyradi/blob/master/pyradi/hdf5-as-data-format.md
for more information on using HDF5 as a data structure.
Retain zero in the array as black in the image, only scale the max value to 255
Args:
| hfd5f (H5py file): the file to be opened
| prefix (string): prefix to be prepended to filename
| pformat (string): type of file to be created png/jpeg
| lstimgs ([string]): list of paths to image in the HFD5 file
Returns:
| Nothing.
Raises:
| No exception is raised.
Author: <NAME>
"""
from . import ryplot
for lstimg in lstimgs:
arr = hfd5f['{}'.format(lstimg)][...]
if debug:
print('data set {} has shape {} '.format(lstimg,arr.shape))
# if arr.shape is not ():
if arr.shape != ():
if np.max(arr) != 0.:
arr = 255 * arr/np.max(arr)
imsave('{}-{}.{}'.format(prefix,lstimg.replace('/','-'),pformat), arr.astype(np.uint8))
######################################################################################
def plotHDF5Images(hfd5f, prefix, colormap=mcm.jet, cbarshow=True, lstimgs=None, logscale=False, debug=False):
"""Plot images contained in hfd5f with colour map to show magnitude.
See https://github.com/NelisW/pyradi/blob/master/pyradi/hdf5-as-data-format.md
for more information on using HDF5 as a data structure.
http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps
Args:
| hfd5f (H5py file): the file to be opened
| prefix (string): prefix to be prepended to filename
| colormap (Matplotlib colour map): colour map to be used in plot
| cbarshow (boolean): indicate if colour bar must be shown
| lstimgs ([string]): list of paths to image in the HFD5 file
| logscale (boolean): True if display must be on log scale
Returns:
| Nothing.
Raises:
| No exception is raised.
Author: <NAME>
"""
from . import ryplot
for lstimg in lstimgs:
arr = hfd5f['{}'.format(lstimg)][...]
if debug:
print('data set {} has shape {} '.format(lstimg,arr.shape))
# if arr.shape is not ():
if arr.shape != ():
if logscale:
filename = '{}-plot-{}-log.png'.format(prefix,lstimg.replace('/','-'))
with ryplot.savePlot(1,1,1,figsize=(8,8), saveName=[filename]) as p:
p.showImage(1, np.log10(arr), ptitle=lstimg, cmap=colormap, cbarshow=cbarshow);
else:
filename = '{}-plot-{}.png'.format(prefix,lstimg.replace('/','-'))
with ryplot.savePlot(1,1,1,figsize=(8,8), saveName=[filename]) as p:
p.showImage(1, arr, ptitle=lstimg, cmap=colormap, cbarshow=cbarshow);
######################################################################################
def plotHDF5Histograms(hfd5f, prefix, format='png', lstimgs=None, bins=50):
"""Plot histograms of images contained in hfd5f
See https://github.com/NelisW/pyradi/blob/master/pyradi/hdf5-as-data-format.md
for more information on using HDF5 as a data structure.
Retain zero in the array as black in the image, only scale the max value to 255
Args:
| hfd5f (H5py file): the file to be opened
| prefix (string): prefix to be prepended to filename
| format (string): type of file to be created png/jpeg
| lstimgs ([string]): list of paths to image in the HFD5 file
| bins ([int]): Number of bins to be used in histogram
Returns:
| Nothing.
Raises:
| No exception is raised.
Author: <NAME>
"""
from . import ryplot
for lstimg in lstimgs:
arr = hfd5f['{}'.format(lstimg)][...]
his, bin = np.histogram(arr,bins=bins)
filename = '{}-hist-plot-{}.{}'.format(prefix,lstimg.replace('/','-'),format)
with ryplot.savePlot(1,1,1,figsize=(8,4), saveName=[filename]) as p:
p.plot(1, (bin[1:]+bin[:-1])/2, his, '{}, {} bins'.format(lstimg, bins), 'Magnitude','Counts / bin',maxNX=5)
################################################################
def mergeDFS(df1, df2,leftPre=None,rightPre=None,bounds_error=False,
mergeOn=None):
"""Merge two pandas data frames on common column return merged df.
by default the merging takes place on columns named Time or time,
but the merge column name can be specified in mergeOn
Args:
| df1 (dataframe): first dataframe to be merged
| df2 (dataframe): first dataframe to be merged
| leftPre (string): to be prepended to df1 columns names, except time
| rightPre (string): to be prepended to df2 columns names, except time
| bounds_error (boolean): pass through to interpolation function
| mergeOn (string): if not merging on time use this to merge on
Returns:
| (pd.DataFrame): A Pandas data frame with the merged data
Raises:
| No exception is raised.
"""
if mergeOn is not None:
if 'time' in df1.columns or 'time' in df2.columns or \
'Time' in df1.columns or 'Time' in df2.columns:
print('The "time" columns may not already be present in a dataframe')
return pd.DataFrame()
# rename merge column to time, later rename back
df1 = df1.rename(columns={mergeOn: 'time'})
df2 = df2.rename(columns={mergeOn: 'time'})
# we want time to be lower case
df1 = df1.rename(columns={'Time': 'time'})
df2 = df2.rename(columns={'Time': 'time'})
if leftPre is not None:
ncols = []
cols = df1.columns
for col in cols:
# if not 'Time' in col and not 'time' in col:
if not 'time' in col:
ncol = leftPre + '_' + col
else:
ncol = col
ncols.append(ncol)
df1.columns = ncols
if rightPre is not None:
ncols = []
cols = df2.columns
for col in cols:
# if not 'Time' in col and not 'time' in col:
if not 'time' in col:
ncol = rightPre + '_' + col
else:
ncol = col
ncols.append(ncol)
df2.columns = ncols
# default setting, change down below
merged = False
# test single row entries
if df1.shape[0]==1 and df2.shape[0]==1:
if df1['time'].iloc[0] | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import logging
import tempfile
import subprocess
import argparse
import zipfile
import time
import shutil
import uuid
from slugify import slugify
import version
from modules.utils import get_executable_name, get_executable_path, format_pattern, clean_file_name
from modules.fb2html import Fb2XHTML
from modules.epub import EpubProc
from modules.config import ConverterConfig
from modules.sendtokindle import SendToKindle
from modules.mobi_split import mobi_split, mobi_read
from modules.mobi_pagemap import PageMapProcessor
def create_epub(rootdir, epubname):
epub = zipfile.ZipFile(epubname, "w")
epub.write(os.path.join(rootdir, 'mimetype'), 'mimetype', zipfile.ZIP_STORED)
for root, _, files in os.walk(rootdir):
relpath = os.path.relpath(root, rootdir)
if relpath != '.':
epub.write(root, relpath)
for filename in files:
if filename != 'mimetype' and filename != '_unzipped.fb2':
epub.write(os.path.join(root, filename), os.path.join(relpath, filename), zipfile.ZIP_DEFLATED)
epub.close()
def get_mobi_filename(filename, translit=False):
out_file, _ = os.path.splitext(filename)
if out_file.lower().endswith('.fb2'):
out_file, _ = os.path.splitext(out_file)
if translit:
out_file = slugify(out_file)
return '{0}.mobi'.format(out_file)
def unzip(filename, tempdir):
unzipped_file = None
zfile = zipfile.ZipFile(filename)
zname = zfile.namelist()[0]
_, zfilename = os.path.split(zname)
if zfilename:
unzipped_file = os.path.join(tempdir, '_unzipped.fb2')
with open(unzipped_file, 'wb') as f:
f.write(zfile.read(zname))
zfile.close()
return unzipped_file
def unzip_epub(filename, tempdir):
unzipped_file = None
zfile = zipfile.ZipFile(filename)
zname = next((x for x in zfile.namelist() if x.lower().endswith('.opf')), None)
if zname:
zfile.extractall(tempdir)
unzipped_file = os.path.normpath(os.path.join(tempdir, zname))
zfile.close()
return unzipped_file
def rm_tmp_files(dest, deleteroot=True):
for root, dirs, files in os.walk(dest, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
if deleteroot:
os.rmdir(dest)
def process_file(config, infile, outfile=None):
start_time = time.clock()
temp_dir = tempfile.mkdtemp()
if not os.path.exists(infile):
config.log.critical('File {0} not found'.format(infile))
return
fbname = os.path.basename(infile)
config.log.info('Converting "{0}" in "{1}"...'.format(fbname, temp_dir))
config.log.info('Using profile "{0}".'.format(config.current_profile['name']))
# Проверка корректности параметров
if infile:
if not infile.lower().endswith(('.fb2', '.fb2.zip', '.zip', '.epub')):
config.log.critical('"{0}" not *.fb2, *.fb2.zip, *.zip or *.epub'.format(infile))
return
if not config.current_profile['css'] and not infile.lower().endswith(('.epub')):
config.log.warning('Profile does not have link to css file.')
if 'xslt' in config.current_profile and not os.path.exists(config.current_profile['xslt']):
config.log.critical('Transformation file {0} not found'.format(config.current_profile['xslt']))
return
if config.kindle_compression_level < 0 or config.kindle_compression_level > 2:
config.log.warning('Parameter kindleCompressionLevel should be between 0 and 2, using default value (1).')
config.kindle_compression_level = 1
# Если не задано имя выходного файла - вычислим
if not outfile:
outdir, outputfile = os.path.split(infile)
outputfile = get_mobi_filename(outputfile, config.transliterate)
if config.output_dir:
if not os.path.exists(config.output_dir):
os.makedirs(config.output_dir)
if config.input_dir and config.save_structure:
rel_path = os.path.join(config.output_dir, os.path.dirname(os.path.relpath(infile, config.input_dir)))
if not os.path.exists(rel_path):
os.makedirs(rel_path)
outputfile = os.path.join(rel_path, outputfile)
else:
outputfile = os.path.join(config.output_dir, outputfile)
else:
outputfile = os.path.join(outdir, outputfile)
else:
_output_file, _output_format = os.path.splitext(outfile)
if _output_format not in ('.mobi', '.azw3', '.epub'):
config.log.critical('Output format "{0}" is not supported'.format(_output_format))
return
else:
_output_format = _output_format.lower()[1:]
if not config.mhl:
config.output_format = _output_format
outputfile = '{0}.{1}'.format(_output_file, config.output_format)
if config.output_format.lower() == 'epub':
# Для epub всегда разбиваем по главам
config.current_profile['chapterOnNewPage'] = True
input_ext = os.path.splitext(infile)[1].lower()
if input_ext == '.epub':
config.log.info('Unpacking epub...')
tmp_infile = infile
try:
infile = unzip_epub(infile, temp_dir)
except:
config.log.critical('Error unpacking file "{0}".'.format(tmp_infile))
return
if not infile:
config.log.critical('Error unpacking file "{0}".'.format(tmp_infile))
return
# Let's see what we could do
config.log.info('Processing epub...')
epubparser = EpubProc(infile, config)
epubparser.process()
document_id = epubparser.book_uuid
else:
if input_ext == '.zip':
config.log.info('Unpacking...')
tmp_infile = infile
try:
infile = unzip(infile, temp_dir)
except:
config.log.critical('Error unpacking file "{0}".'.format(tmp_infile))
return
if not infile:
config.log.critical('Error unpacking file "{0}".'.format(tmp_infile))
return
# Конвертируем в html
config.log.info('Converting fb2 to html...')
interrupted = False
try:
fb2parser = Fb2XHTML(infile, temp_dir, config)
fb2parser.generate()
document_id = fb2parser.book_uuid
infile = os.path.join(temp_dir, 'OEBPS', 'content.opf')
if not outfile and config.output_pattern:
# pylint: disable=C0330
# yapf: disable
fname = format_pattern(config.output_pattern,
[
('#title', '' if not fb2parser.book_title else fb2parser.book_title.strip()),
('#series', '' if not fb2parser.book_series else fb2parser.book_series.strip()),
('#abbrseries', ''.join(word[0] for word in fb2parser.book_series.split()).lower() if fb2parser.book_series else ''),
('#number', '' if not fb2parser.book_series_num else fb2parser.book_series_num.strip()),
('#padnumber', '' if not fb2parser.book_series_num else fb2parser.book_series_num.strip().zfill(fb2parser.seriespositions)),
('#authors', fb2parser.get_book_authors()),
('#author', fb2parser.get_book_authors(True)),
('#bookid', document_id),
])
# yapf: enable
head, tail = os.path.splitext(outputfile)
fname = clean_file_name(fname)
config.log.info('Re-setting output file name to "{0}"...'.format(fname))
outputfile = os.path.join(os.path.dirname(head), fname + tail)
except:
interrupted = True
config.log.critical('Error while converting file "{0}"'.format(infile))
config.log.debug('Getting details', exc_info=True)
finally:
if config.debug:
# to avoid problems with file name length we'll base debug directory name on input file, but will place it in output file directory
# part of the name will be number of seconds from epoc start - so name will be unique and easily sorted
debug_dir = '{0}_debug_{1}'.format(os.path.join(os.path.dirname(outputfile), os.path.splitext(fbname)[0]), time.strftime("%Y%m%d%H%M%S"))
# for debugging
config.log.info('Copying intermediate files to "{0}"...'.format(debug_dir))
if os.path.exists(debug_dir):
rm_tmp_files(debug_dir)
shutil.copytree(temp_dir, debug_dir)
if not input_ext == '.epub':
# Store fb2 after xslt transformation for debugging
fb2parser.write_debug(debug_dir)
if interrupted:
# pylint: disable=W0150
return
config.log.info('Processing took {0} sec.'.format(round(time.clock() - start_time, 2)))
if config.output_format.lower() in ('mobi', 'azw3'):
# Запускаем kindlegen
application_path = get_executable_path()
kindlegen_cmd = 'kindlegen.exe' if version.WINDOWS else 'kindlegen'
if os.path.exists(os.path.join(application_path, kindlegen_cmd)):
kindlegen_cmd = os.path.join(application_path, kindlegen_cmd)
interrupted = False
try:
config.log.info('Running kindlegen...')
kindlegen_cmd_pars = '-c{0}'.format(config.kindle_compression_level)
startupinfo = None
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
with subprocess.Popen([kindlegen_cmd, infile, kindlegen_cmd_pars, '-locale', 'en'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, startupinfo=startupinfo) as result:
config.log.debug(str(result.stdout.read(), 'utf-8', errors='replace'))
except OSError as e:
interrupted = True
if e.errno == os.errno.ENOENT:
config.log.critical('{0} not found'.format(kindlegen_cmd))
else:
if version.WINDOWS:
config.log.critical(e.winerror)
config.log.critical(e.strerror)
config.log.debug('Getting details', exc_info=True, stack_info=True)
finally:
if interrupted:
rm_tmp_files(temp_dir)
sys.exit(-1)
elif config.output_format.lower() == 'epub':
# Собираем epub
config.log.info('Creating epub...')
outputfile = os.path.splitext(outputfile)[0] + '.epub'
create_epub(temp_dir, outputfile)
ext = config.output_format.lower()
if ext in ('mobi', 'azw3'):
# Копируем mobi(azw3) из временного в выходной каталог
result_book = infile.replace('.opf', '.mobi')
if not os.path.isfile(result_book):
config.log.critical('kindlegen error, conversion interrupted.')
rm_tmp_files(temp_dir)
return
else:
try:
remove_personal = config.current_profile['kindleRemovePersonalLabel']
if ext in 'mobi' and config.noMOBIoptimization:
config.log.info('Copying resulting file...')
shutil.copyfile(result_book, outputfile)
else:
config.log.info('Optimizing resulting file...')
splitter = mobi_split(result_book, document_id, remove_personal, ext)
open(os.path.splitext(outputfile)[0] + '.' + ext, 'wb').write(splitter.getResult() if ext == 'mobi' else splitter.getResult8())
except:
config.log.critical('Error optimizing file, conversion interrupted.')
config.log.debug('Getting details', exc_info=True, stack_info=True)
rm_tmp_files(temp_dir)
return
if config.apnx:
try:
base, _ = os.path.splitext(outputfile)
reader = mobi_read(base + '.' + ext)
pagedata = reader.getPageData()
if pagedata:
config.log.info('Generating page index (APNX)...')
pages = PageMapProcessor(pagedata, config.log)
asin = reader.getCdeContentKey()
if not asin:
asin = reader.getASIN()
apnx = pages.generateAPNX({
'contentGuid': str(uuid.uuid4()).replace('-', '')[:8],
'asin': asin,
'cdeType': reader.getCdeType(),
'format': 'MOBI_8' if ext in 'azw3' else 'MOBI_7',
'pageMap': pages.getPageMap(),
'acr': reader.getACR()
})
if config.apnx == 'eink':
basename = os.path.basename(base)
sdr = base + '.sdr'
if not os.path.exists(sdr):
os.makedirs(sdr)
apnxfile = os.path.join(sdr, basename + '.apnx')
else:
apnxfile = base + '.apnx'
open(apnxfile, 'wb').write(apnx)
else:
config.log.warning('No information to generate page index')
except:
config.log.warning('Unable to generate page index (APNX)')
config.log.debug('Getting details', exc_info=True, stack_info=True)
config.log.info('Book conversion completed in {0} sec.\n'.format(round(time.clock() - start_time, 2)))
if config.send_to_kindle['send']:
if config.output_format.lower() != 'mobi':
config.log.warning('Kindle Personal Documents Service only accepts personal mobi files')
else:
config.log.info('Sending book...')
try:
kindle = SendToKindle()
kindle.smtp_server = config.send_to_kindle['smtpServer']
kindle.smtp_port = config.send_to_kindle['smtpPort']
kindle.smtp_login = config.send_to_kindle['smtpLogin']
kindle.smtp_password = config.send_to_kindle['smtpPassword']
kindle.user_email = config.send_to_kindle['fromUserEmail']
kindle.kindle_email = config.send_to_kindle['toKindleEmail']
kindle.convert = False
kindle.send_mail([outputfile])
config.log.info('Book has been sent to "{0}"'.format(config.send_to_kindle['toKindleEmail']))
if config.send_to_kindle['deleteSendedBook']:
try:
os.remove(outputfile)
except:
config.log.error('Unable to remove file "{0}".'.format(outputfile))
return
except KeyboardInterrupt:
print('User interrupt. Exiting...')
rm_tmp_files(temp_dir)
sys.exit(-1)
except:
config.log.error('Error sending file')
config.log.debug('Getting details', exc_info=True, stack_info=True)
# Чистим временные файлы
rm_tmp_files(temp_dir)
return os.path.splitext(outputfile)[0] + '.' + ext
def process_folder(config, inputdir, outputdir=None):
if outputdir:
if not os.path.exists(outputdir):
os.makedirs(outputdir)
if os.path.isdir(inputdir):
for root, _, files in os.walk(inputdir):
for file in files:
try:
if file.lower().endswith(('.fb2', '.fb2.zip', '.zip', '.epub')):
inputfile = os.path.join(root, file)
# Обработка каталога. Смотрим признак рекурсии по подкаталогам
if (not config.recursive and inputdir == root) or config.recursive:
process_file(config, inputfile, None)
if config.delete_source_file:
try:
os.remove(inputfile)
except:
config.log.error('Unable to remove file "{0}"'.format(inputfile))
continue
except KeyboardInterrupt as e:
print('User interrupt. Exiting...')
sys.exit(-1)
except IOError as e:
config.log.error('(I/O error {0}) {1} - {2}'.format(e.errno, e.strerror, e.filename))
except:
config.log.error('Error processing folder')
config.log.debug('Getting details', exc_info=True, stack_info=True)
else:
config.log.critical('Unable to find directory "{0}"'.format(inputdir))
sys.exit(-1)
def get_log_level(log_level):
if log_level.lower() == 'info':
return logging.INFO
elif log_level.lower() == 'error':
return logging.ERROR
elif log_level.lower() == 'critical':
return logging.CRITICAL
elif log_level.lower() == 'debug':
return logging.DEBUG
else:
return logging.INFO
def process(myargs):
infile = myargs.infile
outfile = myargs.outfile
application_path = get_executable_path()
config_file_name = "{0}.config".format(get_executable_name())
if myargs.config_file:
if os.path.exists(myargs.config_file):
# full path is given
config_file = myargs.config_file
elif os.path.exists(os.path.join(application_path, myargs.config_file)):
# found relative to program directory
config_file = os.path.join(application_path, myargs.config_file)
else:
print('Unable to locate configuration file "{0}"'.format(myargs.config_file))
sys.exit(-1)
else:
config_file = os.path.join(os.path.expanduser('~'), 'fb2mobi' if version.WINDOWS else '.fb2mobi', config_file_name)
if not os.path.exists(config_file):
# last resort - see if we have default config in program directory
if os.path.exists(os.path.join(application_path, config_file_name)):
config_file = os.path.join(application_path, config_file_name)
# if configuration does not exist - it will be created | |
<gh_stars>100-1000
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from django.db.models import Q
from django.utils import six
from djblets.webapi.errors import DOES_NOT_EXIST, WebAPIError
from djblets.webapi.fields import (BooleanFieldType,
ChoiceFieldType,
DateTimeFieldType,
DictFieldType,
IntFieldType,
ResourceFieldType,
StringFieldType)
from reviewboard.reviews.models import BaseComment
from reviewboard.webapi.base import ImportExtraDataError, WebAPIResource
from reviewboard.webapi.mixins import MarkdownFieldsMixin
from reviewboard.webapi.resources import resources
class BaseCommentResource(MarkdownFieldsMixin, WebAPIResource):
"""Base class for comment resources.
Provides common fields and functionality for all comment resources.
"""
added_in = '1.6'
fields = {
'id': {
'type': IntFieldType,
'description': 'The numeric ID of the comment.',
},
'extra_data': {
'type': DictFieldType,
'description': 'Extra data as part of the comment. This depends '
'on what is being commented on, and may be '
'used in conjunction with an extension.',
'added_in': '2.0',
},
'issue_opened': {
'type': BooleanFieldType,
'description': 'Whether or not a comment opens an issue.',
},
'issue_status': {
'type': ChoiceFieldType,
'choices': tuple(six.iterkeys(BaseComment.ISSUE_STRING_TO_STATUS)),
'description': 'The status of an issue.',
},
'public': {
'type': BooleanFieldType,
'description': 'Whether or not the comment is part of a public '
'review.',
'added_in': '2.0',
},
'text': {
'type': StringFieldType,
'description': 'The comment text.',
'supports_text_types': True,
'added_in': '2.0',
},
'text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'The mode for the comment text field.',
'added_in': '2.0',
},
'timestamp': {
'type': DateTimeFieldType,
'description': 'The date and time that the comment was made.',
'added_in': '2.0',
},
'user': {
'type': ResourceFieldType,
'resource': 'reviewboard.webapi.resources.user.UserResource',
'description': 'The user who made the comment.',
'added_in': '2.0',
},
}
# Common field definitions for create/update requests
_COMMON_REQUIRED_CREATE_FIELDS = {
'text': {
'type': StringFieldType,
'description': 'The comment text.',
'supports_text_types': True,
'added_in': '2.0',
},
}
_COMMON_OPTIONAL_CREATE_FIELDS = {
'force_text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'The text type, if any, to force for returned '
'text fields. The contents will be converted '
'to the requested type in the payload, but '
'will not be saved as that type.',
'added_in': '2.0.9',
},
'text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.SAVEABLE_TEXT_TYPES,
'description': 'The content type for the comment text field. '
'The default is ``plain``.',
'added_in': '2.0',
},
}
_COMMON_OPTIONAL_UPDATE_FIELDS = {
'force_text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'The text type, if any, to force for returned '
'text fields. The contents will be converted '
'to the requested type in the payload, but '
'will not be saved as that type.',
'added_in': '2.0.9',
},
'text': {
'type': StringFieldType,
'description': 'The comment text.',
'supports_text_types': True,
'added_in': '2.0',
},
'text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.SAVEABLE_TEXT_TYPES,
'description': 'The new content type for the comment text field. '
'The default is to leave the type unchanged.',
'added_in': '2.0',
},
}
# Field definitions for top-level comment create/update requests
REQUIRED_CREATE_FIELDS = _COMMON_REQUIRED_CREATE_FIELDS
OPTIONAL_CREATE_FIELDS = dict({
'issue_opened': {
'type': BooleanFieldType,
'description': 'Whether the comment opens an issue.',
'added_in': '2.0',
},
}, **_COMMON_OPTIONAL_CREATE_FIELDS)
OPTIONAL_UPDATE_FIELDS = dict({
'issue_opened': {
'type': BooleanFieldType,
'description': 'Whether or not the comment opens an issue.',
'added_in': '2.0',
},
'issue_status': {
'type': ChoiceFieldType,
'choices': tuple(six.iterkeys(BaseComment.ISSUE_STRING_TO_STATUS)),
'description': 'The status of an open issue.',
'added_in': '2.0',
},
}, **_COMMON_OPTIONAL_UPDATE_FIELDS)
# Field definitions for comment reply create/update requests
REPLY_REQUIRED_CREATE_FIELDS = dict({
'reply_to_id': {
'type': IntFieldType,
'description': 'The ID of the comment being replied to.',
},
}, **_COMMON_REQUIRED_CREATE_FIELDS)
REPLY_OPTIONAL_CREATE_FIELDS = _COMMON_OPTIONAL_CREATE_FIELDS
REPLY_OPTIONAL_UPDATE_FIELDS = _COMMON_OPTIONAL_UPDATE_FIELDS
def serialize_issue_status_field(self, obj, **kwargs):
return BaseComment.issue_status_to_string(obj.issue_status)
def has_access_permissions(self, request, obj, *args, **kwargs):
return obj.is_accessible_by(request.user)
def has_modify_permissions(self, request, obj, *args, **kwargs):
return obj.is_mutable_by(request.user)
def has_delete_permissions(self, request, obj, *args, **kwargs):
return obj.is_mutable_by(request.user)
def create_comment(self,
review,
fields,
text,
comments_m2m,
issue_opened=False,
text_type=MarkdownFieldsMixin.TEXT_TYPE_PLAIN,
extra_fields={},
save=True,
**kwargs):
"""Create a comment based on the requested data.
This will construct a comment of the type represented by the resource,
setting the issue states, text, extra_data, and any additional fields
provided by the caller.
Args:
review (reviewboard.reviews.models.review.Review):
The review owning the comment.
fields (list of unicode):
The model fields that can be set through the API.
text (unicode):
The comment text.
comments_m2m (django.db.models.ManyToManyField):
The review's comments relation, where the new comment will
be added.
issue_opened (bool, optional):
Whether this comment opens an issue.
text_type (unicode, optional):
The text type for the comment. This defaults to plain text.
extra_fields (dict, optional):
Extra fields from the request not otherwise handled by the
API resource. Any ``extra_data`` modifications from this will
be applied to the comment.
save (bool, optional):
Whether or not to save the field and update ``comments_m2m``.
If ``False``, the caller is responsible for performing the
save.
**kwargs (dict):
Keyword arguments representing additional fields handled by
the API resource. Any that are also listed in ``fields`` will
be set on the model.
Returns:
tuple or djblets.webapi.errors.WebAPIError:
Either a successful payload containing the comment, or an error
payload.
"""
comment_kwargs = {
'issue_opened': bool(issue_opened),
'rich_text': text_type == self.TEXT_TYPE_MARKDOWN,
'text': text.strip(),
}
for field in fields:
comment_kwargs[field] = kwargs.get(field)
new_comment = self.model(**comment_kwargs)
try:
self.import_extra_data(new_comment, new_comment.extra_data,
extra_fields)
except ImportExtraDataError as e:
return e.error_payload
if issue_opened:
new_comment.issue_status = BaseComment.OPEN
else:
new_comment.issue_status = None
if save:
new_comment.save()
comments_m2m.add(new_comment)
return 201, {
self.item_result_key: new_comment,
}
def create_or_update_comment_reply(self, request, comment, reply,
comments_m2m, default_attrs={},
*args, **kwargs):
"""Create a reply to a comment based on the requested data.
If there's an existing reply to a comment, that one will be updated
instead.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
comment (reviewboard.reviews.models.base_commet.BaseComment):
The comment being replied to.
reply (reviewboard.reviews.models.review.Review):
The review reply owning the comment.
comments_m2m (django.db.models.ManyToManyField):
The reply's comments relation, where the new comment will
be added.
default_attrs (dict, optional):
Default attributes to add to the new comment reply, if an
existing one does not exist.
*args (tuple):
Positional arguments from the caller.
**kwargs (dict):
Keyword arguments from the caller.
Returns:
tuple or djblets.webapi.errors.WebAPIError:
Either a successful payload containing the comment, or an error
payload.
"""
q = self._get_queryset(request, *args, **kwargs)
q = q.filter(Q(reply_to=comment) & Q(review=reply))
try:
new_comment = q.get()
# This already exists. Go ahead and update, but we're going to
# redirect the user to the right place.
is_new = False
except self.model.DoesNotExist:
new_comment = self.model(reply_to=comment, **default_attrs)
is_new = True
rsp = self.update_comment(request=request,
review=reply,
comment=new_comment,
is_reply=True,
**kwargs)
if isinstance(rsp, WebAPIError):
return rsp
data = rsp[1]
if is_new:
comments_m2m.add(new_comment)
reply.save()
return 201, data
else:
return 303, data, {
'Location': self.get_href(new_comment, request, *args,
**kwargs)
}
def update_comment(self, request, review, comment, update_fields=(),
extra_fields={}, is_reply=False, **kwargs):
"""Update an existing comment based on the requested data.
This will modify a comment, setting new fields requested by the caller.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
review (reviewboard.reviews.models.review.Review):
The review owning the comment.
comment (reviewboard.reviews.models.base_comment.BaseComment):
The comment to update.
update_fields (list of unicode, optional):
The model fields that can be updated through the API.
extra_fields (dict, optional):
Extra fields from the request not otherwise handled by the
API resource. Any ``extra_data`` modifications from this will
be applied to the comment.
is_reply (bool, optional):
Whether this is a reply to another comment.
**kwargs (dict):
Keyword arguments representing additional fields handled by
the API resource. Any that are also listed in ``fields`` will
be set on the model.
Returns:
tuple or djblets.webapi.errors.WebAPIError:
Either a successful payload containing the comment, or an error
payload.
"""
if is_reply:
if not resources.review_reply.has_modify_permissions(request,
review):
return self.get_no_access_error(request)
else:
# Determine whether or not we're updating the issue status.
if self.should_update_issue_status(comment, **kwargs):
return self.update_issue_status(request, self, **kwargs)
if not resources.review.has_modify_permissions(request, review):
return self.get_no_access_error(request)
# If we've updated the comment from having no issue opened,
# to having an issue opened, we need to set the issue status
# to OPEN.
if not comment.issue_opened and kwargs.get('issue_opened', False):
comment.issue_status = BaseComment.OPEN
# If we've updated the comment from having an issue opened to
# having no issue opened, set the issue status back to null.
if comment.issue_opened and not kwargs.get('issue_opened', True):
comment.issue_status = None
for field in ('issue_opened',) + update_fields:
value = kwargs.get(field, None)
if value is not None:
if isinstance(value, six.string_types):
value = value.strip()
setattr(comment, field, value)
self.set_text_fields(comment, 'text', **kwargs)
if not is_reply:
try:
self.import_extra_data(comment, comment.extra_data,
extra_fields)
except ImportExtraDataError as e:
return e.error_payload
comment.save()
return 200, {
self.item_result_key: comment,
}
def update_issue_status(self, request, comment_resource, *args, | |
"1959:147"): "metadataonly",
("prop", "1959:148"): "metadataonly",
("prop", "1959:149"): "metadataonly",
("prop", "1959:15"): "metadataonly",
("prop", "1959:151"): "metadataonly",
("prop", "1959:152"): "metadataonly",
("prop", "1959:154"): "metadataonly",
("prop", "1959:156"): "metadataonly",
("prop", "1959:159"): "metadataonly",
("prop", "1959:16"): "metadataonly",
("prop", "1959:17"): "metadataonly",
("prop", "1959:18"): "metadataonly",
("prop", "1959:22"): "metadataonly",
("prop", "1959:23"): "metadataonly",
("prop", "1959:24"): "metadataonly",
("prop", "1959:25"): "metadataonly",
("prop", "1959:26"): "metadataonly",
("prop", "1959:27"): "metadataonly",
("prop", "1959:28"): "metadataonly",
("prop", "1959:29"): "metadataonly",
("prop", "1959:32"): "metadataonly",
("prop", "1959:33"): "metadataonly",
("prop", "1959:34"): "metadataonly",
("prop", "1959:36"): "metadataonly",
("prop", "1959:37"): "metadataonly",
("prop", "1959:39"): "metadataonly",
("prop", "1959:40"): "metadataonly",
("prop", "1959:41"): "metadataonly",
("prop", "1959:43"): "metadataonly",
("prop", "1959:46"): "metadataonly",
("prop", "1959:47"): "metadataonly",
("prop", "1959:48"): "metadataonly",
("prop", "1959:49"): "metadataonly",
("prop", "1959:5"): "metadataonly",
("prop", "1959:50"): "metadataonly",
("prop", "1959:51"): "metadataonly",
("prop", "1959:53"): "metadataonly",
("prop", "1959:54"): "metadataonly",
("prop", "1959:55"): "metadataonly",
("prop", "1959:56"): "metadataonly",
("prop", "1959:58"): "metadataonly",
("prop", "1959:59"): "metadataonly",
("prop", "1959:60"): "metadataonly",
("prop", "1959:61"): "metadataonly",
("prop", "1959:62"): "metadataonly",
("prop", "1959:64"): "metadataonly",
("prop", "1959:65"): "metadataonly",
("prop", "1959:66"): "metadataonly",
("prop", "1959:68"): "metadataonly",
("prop", "1959:7"): "metadataonly",
("prop", "1959:71"): "metadataonly",
("prop", "1959:73"): "metadataonly",
("prop", "1959:74"): "metadataonly",
("prop", "1959:75"): "metadataonly",
("prop", "1959:76"): "metadataonly",
("prop", "1959:77"): "metadataonly",
("prop", "1959:79"): "metadataonly",
("prop", "1959:8"): "metadataonly",
("prop", "1959:80"): "metadataonly",
("prop", "1959:81"): "metadataonly",
("prop", "1959:82"): "metadataonly",
("prop", "1959:83"): "metadataonly",
("prop", "1959:84"): "metadataonly",
("prop", "1959:85"): "metadataonly",
("prop", "1959:86"): "metadataonly",
("prop", "1959:87"): "metadataonly",
("prop", "1959:88"): "metadataonly",
("prop", "1959:89"): "metadataonly",
("prop", "1959:90"): "metadataonly",
("prop", "1959:91"): "metadataonly",
("prop", "1959:92"): "metadataonly",
("prop", "1959:93"): "metadataonly",
("prop", "1959:94"): "metadataonly",
("prop", "1959:95"): "metadataonly",
("prop", "1959:96"): "metadataonly",
("prop", "1959:97"): "metadataonly",
("prop", "1959:98"): "metadataonly",
("prop", "1959:99"): "metadataonly",
("prop", "1960:105"): "metadataonly",
("prop", "1960:107"): "metadataonly",
("prop", "1960:108"): "metadataonly",
("prop", "1960:11"): "metadataonly",
("prop", "1960:110"): "metadataonly",
("prop", "1960:111"): "metadataonly",
("prop", "1960:113"): "metadataonly",
("prop", "1960:114"): "metadataonly",
("prop", "1960:117"): "metadataonly",
("prop", "1960:118"): "metadataonly",
("prop", "1960:12"): "metadataonly",
("prop", "1960:121"): "metadataonly",
("prop", "1960:122"): "metadataonly",
("prop", "1960:124"): "metadataonly",
("prop", "1960:125"): "metadataonly",
("prop", "1960:129"): "metadataonly",
("prop", "1960:13"): "metadataonly",
("prop", "1960:130"): "metadataonly",
("prop", "1960:131"): "metadataonly",
("prop", "1960:132"): "metadataonly",
("prop", "1960:133"): "metadataonly",
("prop", "1960:136"): "metadataonly",
("prop", "1960:137"): "metadataonly",
("prop", "1960:138"): "metadataonly",
("prop", "1960:14"): "metadataonly",
("prop", "1960:142"): "metadataonly",
("prop", "1960:144"): "metadataonly",
("prop", "1960:145"): "metadataonly",
("prop", "1960:146"): "metadataonly",
("prop", "1960:147"): "metadataonly",
("prop", "1960:148"): "metadataonly",
("prop", "1960:149"): "metadataonly",
("prop", "1960:152"): "metadataonly",
("prop", "1960:153"): "metadataonly",
("prop", "1960:154"): "metadataonly",
("prop", "1960:155"): "metadataonly",
("prop", "1960:156"): "metadataonly",
("prop", "1960:157"): "metadataonly",
("prop", "1960:158"): "metadataonly",
("prop", "1960:16"): "metadataonly",
("prop", "1960:164"): "metadataonly",
("prop", "1960:166"): "metadataonly",
("prop", "1960:18"): "metadataonly",
("prop", "1960:19"): "metadataonly",
("prop", "1960:21"): "metadataonly",
("prop", "1960:23"): "metadataonly",
("prop", "1960:24"): "metadataonly",
("prop", "1960:26"): "metadataonly",
("prop", "1960:27"): "metadataonly",
("prop", "1960:31"): "metadataonly",
("prop", "1960:32"): "metadataonly",
("prop", "1960:35"): "metadataonly",
("prop", "1960:36"): "metadataonly",
("prop", "1960:37"): "metadataonly",
("prop", "1960:4"): "metadataonly",
("prop", "1960:41"): "metadataonly",
("prop", "1960:44"): "metadataonly",
("prop", "1960:45"): "metadataonly",
("prop", "1960:46"): "metadataonly",
("prop", "1960:47"): "metadataonly",
("prop", "1960:48"): "metadataonly",
("prop", "1960:49"): "metadataonly",
("prop", "1960:51"): "metadataonly",
("prop", "1960:54"): "metadataonly",
("prop", "1960:55"): "metadataonly",
("prop", "1960:57"): "metadataonly",
("prop", "1960:58"): "metadataonly",
("prop", "1960:59"): "metadataonly",
("prop", "1960:6"): "metadataonly",
("prop", "1960:60"): "metadataonly",
("prop", "1960:62"): "metadataonly",
("prop", "1960:65"): "metadataonly",
("prop", "1960:66"): "metadataonly",
("prop", "1960:69"): "metadataonly",
("prop", "1960:7"): "metadataonly",
("prop", "1960:70"): "metadataonly",
("prop", "1960:71"): "metadataonly",
("prop", "1960:72"): "metadataonly",
("prop", "1960:73"): "metadataonly",
("prop", "1960:8"): "metadataonly",
("prop", "1960:80"): "metadataonly",
("prop", "1960:82"): "metadataonly",
("prop", "1960:83"): "metadataonly",
("prop", "1960:84"): "metadataonly",
("prop", "1960:86"): "metadataonly",
("prop", "1960:87"): "metadataonly",
("prop", "1960:90"): "metadataonly",
("prop", "1960:91"): "metadataonly",
("prop", "1960:92"): "metadataonly",
("prop", "1960:93"): "metadataonly",
("prop", "1960:94"): "metadataonly",
("prop", "1960:95"): "metadataonly",
("prop", "1960:96"): "metadataonly",
("prop", "1960:97"): "metadataonly",
("prop", "1960:99"): "metadataonly",
("prop", "1961:10"): "metadataonly",
("prop", "1961:102"): "metadataonly",
("prop", "1961:103"): "metadataonly",
("prop", "1961:104"): "metadataonly",
("prop", "1961:105"): "metadataonly",
("prop", "1961:106"): "metadataonly",
("prop", "1961:107"): "metadataonly",
("prop", "1961:110"): "metadataonly",
("prop", "1961:111"): "metadataonly",
("prop", "1961:112"): "metadataonly",
("prop", "1961:113"): "metadataonly",
("prop", "1961:114"): "metadataonly",
("prop", "1961:117"): "metadataonly",
("prop", "1961:118"): "metadataonly",
("prop", "1961:120"): "metadataonly",
("prop", "1961:121"): "metadataonly",
("prop", "1961:123"): "metadataonly",
("prop", "1961:124"): "metadataonly",
("prop", "1961:128"): "metadataonly",
("prop", "1961:129"): "metadataonly",
("prop", "1961:130"): "metadataonly",
("prop", "1961:131"): "metadataonly",
("prop", "1961:132"): "metadataonly",
("prop", "1961:133"): "metadataonly",
("prop", "1961:134"): "metadataonly",
("prop", "1961:136"): "metadataonly",
("prop", "1961:137"): "metadataonly",
("prop", "1961:138"): "metadataonly",
("prop", "1961:139"): "metadataonly",
("prop", "1961:14"): "metadataonly",
("prop", "1961:141"): "metadataonly",
("prop", "1961:142"): "metadataonly",
("prop", "1961:144"): "metadataonly",
("prop", "1961:145"): "metadataonly",
("prop", "1961:146"): "metadataonly",
("prop", "1961:147"): "metadataonly",
("prop", "1961:148"): "metadataonly",
("prop", "1961:149"): "metadataonly",
("prop", "1961:15"): "metadataonly",
("prop", "1961:150"): "metadataonly",
("prop", "1961:152"): "metadataonly",
("prop", "1961:153"): "metadataonly",
("prop", "1961:155"): "metadataonly",
("prop", "1961:16"): "metadataonly",
("prop", "1961:162"): "metadataonly",
("prop", "1961:164"): "metadataonly",
("prop", "1961:165"): "metadataonly",
("prop", "1961:166"): "metadataonly",
("prop", "1961:167"): "metadataonly",
("prop", "1961:169"): "metadataonly",
("prop", "1961:17"): "metadataonly",
("prop", "1961:19"): "metadataonly",
("prop", "1961:2"): "metadataonly",
("prop", "1961:20"): "metadataonly",
("prop", "1961:22"): "metadataonly",
("prop", "1961:23"): "metadataonly",
("prop", "1961:24"): "metadataonly",
("prop", "1961:25"): "metadataonly",
("prop", "1961:26"): "metadataonly",
("prop", "1961:29"): "metadataonly",
("prop", "1961:3"): "metadataonly",
("prop", "1961:32"): "metadataonly",
("prop", "1961:33"): "metadataonly",
("prop", "1961:35"): "metadataonly",
("prop", "1961:36"): "metadataonly",
("prop", "1961:39"): "metadataonly",
("prop", "1961:40"): "metadataonly",
("prop", "1961:42"): "metadataonly",
("prop", "1961:44"): "metadataonly",
("prop", "1961:49"): "metadataonly",
("prop", "1961:50"): "metadataonly",
("prop", "1961:51"): "metadataonly",
("prop", "1961:53"): "metadataonly",
("prop", "1961:55"): "metadataonly",
("prop", "1961:60"): "metadataonly",
("prop", "1961:61"): "metadataonly",
("prop", "1961:63"): "metadataonly",
("prop", "1961:64"): "metadataonly",
("prop", "1961:65"): "metadataonly",
("prop", "1961:66"): "metadataonly",
("prop", "1961:67"): "metadataonly",
("prop", "1961:68"): "metadataonly",
("prop", "1961:72"): "metadataonly",
("prop", "1961:73"): "metadataonly",
("prop", "1961:74"): "metadataonly",
("prop", "1961:75"): "metadataonly",
("prop", "1961:76"): "metadataonly",
("prop", "1961:78"): "metadataonly",
("prop", "1961:81"): "metadataonly",
("prop", "1961:83"): "metadataonly",
("prop", "1961:87"): "metadataonly",
("prop", "1961:88"): "metadataonly",
("prop", "1961:89"): "metadataonly",
("prop", "1961:9"): "metadataonly",
("prop", "1961:90"): "metadataonly",
("prop", "1961:91"): "metadataonly",
("prop", "1961:95"): "metadataonly",
("prop", "1961:96"): "metadataonly",
("prop", "1961:97"): "metadataonly",
("prop", "1961:98"): "metadataonly",
("prop", "1961:99"): "metadataonly",
("prop", "1962:106"): "metadataonly",
("prop", "1962:109"): "metadataonly",
("prop", "1962:110"): "metadataonly",
("prop", "1962:111"): "metadataonly",
("prop", "1962:112"): "metadataonly",
("prop", "1962:113"): "metadataonly",
("prop", "1962:115"): "metadataonly",
("prop", "1962:116"): "metadataonly",
("prop", "1962:117"): "metadataonly",
("prop", "1962:118"): "metadataonly",
("prop", "1962:119"): "metadataonly",
("prop", "1962:121"): "metadataonly",
("prop", "1962:123"): "metadataonly",
("prop", "1962:127"): "metadataonly",
("prop", "1962:128"): "metadataonly",
("prop", "1962:129"): "metadataonly",
("prop", "1962:13"): "metadataonly",
("prop", "1962:130"): "metadataonly",
("prop", "1962:132"): "metadataonly",
("prop", "1962:133"): "metadataonly",
("prop", "1962:134"): "metadataonly",
("prop", "1962:135"): "metadataonly",
("prop", "1962:137"): "metadataonly",
("prop", "1962:138"): "metadataonly",
("prop", "1962:139"): "metadataonly",
("prop", "1962:14"): "metadataonly",
("prop", "1962:141"): "metadataonly",
("prop", "1962:142"): "metadataonly",
("prop", "1962:144"): "metadataonly",
("prop", "1962:145"): "metadataonly",
("prop", "1962:146"): "metadataonly",
("prop", "1962:149"): "metadataonly",
("prop", "1962:15"): "metadataonly",
("prop", "1962:150"): "metadataonly",
("prop", "1962:152"): "metadataonly",
("prop", "1962:153"): "metadataonly",
("prop", "1962:156"): "metadataonly",
("prop", "1962:157"): "metadataonly",
("prop", "1962:158"): "metadataonly",
("prop", "1962:160"): "metadataonly",
("prop", "1962:161"): "metadataonly",
("prop", "1962:162"): "metadataonly",
("prop", "1962:164"): "metadataonly",
("prop", "1962:169"): "metadataonly",
("prop", "1962:17"): "metadataonly",
("prop", "1962:170"): "metadataonly",
("prop", "1962:171"): "metadataonly",
("prop", "1962:172"): "metadataonly",
("prop", "1962:173"): "metadataonly",
("prop", "1962:174"): "metadataonly",
("prop", "1962:18"): "metadataonly",
("prop", "1962:2"): "metadataonly",
("prop", "1962:20"): "metadataonly",
("prop", "1962:21"): "metadataonly",
("prop", "1962:22"): "metadataonly",
("prop", "1962:23"): "metadataonly",
("prop", "1962:25"): "metadataonly",
("prop", "1962:26"): "metadataonly",
("prop", "1962:27"): "metadataonly",
("prop", "1962:28"): "metadataonly",
("prop", "1962:29"): "metadataonly",
("prop", "1962:3"): "metadataonly",
("prop", "1962:31"): "metadataonly",
("prop", "1962:32"): "metadataonly",
("prop", "1962:33"): "metadataonly",
("prop", "1962:34"): "metadataonly",
("prop", "1962:35"): "metadataonly",
("prop", "1962:36"): "metadataonly",
("prop", "1962:37"): "metadataonly",
("prop", "1962:38"): "metadataonly",
("prop", "1962:39"): "metadataonly",
("prop", "1962:41"): "metadataonly",
("prop", "1962:42"): "metadataonly",
("prop", "1962:43"): "metadataonly",
("prop", "1962:47"): "metadataonly",
("prop", "1962:48"): "metadataonly",
("prop", "1962:49"): "metadataonly",
("prop", "1962:5"): "default", # BrB
("prop", "1962:55"): "metadataonly",
("prop", "1962:58"): "metadataonly",
("prop", "1962:60"): "metadataonly",
("prop", "1962:61"): "metadataonly",
("prop", "1962:63"): "metadataonly",
("prop", "1962:64"): "metadataonly",
("prop", "1962:65"): "metadataonly",
("prop", "1962:66"): "metadataonly",
("prop", "1962:67"): "metadataonly",
("prop", "1962:69"): "metadataonly",
("prop", "1962:72"): "metadataonly",
("prop", "1962:73"): "metadataonly",
("prop", "1962:74"): "metadataonly",
("prop", "1962:75"): "metadataonly",
("prop", "1962:76"): "metadataonly",
("prop", "1962:79"): "metadataonly",
("prop", "1962:80"): "metadataonly",
("prop", "1962:81"): "metadataonly",
("prop", "1962:83"): "metadataonly",
("prop", "1962:84"): "metadataonly",
("prop", "1962:86"): "metadataonly",
("prop", "1962:87"): "metadataonly",
("prop", "1962:88"): "metadataonly",
("prop", "1962:89"): "metadataonly",
("prop", "1962:91"): "metadataonly",
("prop", "1962:93"): "metadataonly",
("prop", "1962:95"): "metadataonly",
("prop", "1962:96"): "metadataonly",
("prop", "1962:97"): "metadataonly",
("prop", "1962:99"): "metadataonly",
("prop", "1963:10"): "metadataonly",
("prop", "1963:102"): "metadataonly",
("prop", "1963:104"): "metadataonly",
("prop", "1963:105"): "metadataonly",
("prop", "1963:11"): "metadataonly",
("prop", "1963:110"): "metadataonly",
("prop", "1963:111"): "metadataonly",
("prop", "1963:112"): "metadataonly",
("prop", "1963:115"): "metadataonly",
("prop", "1963:116"): "metadataonly",
("prop", "1963:119"): "metadataonly",
("prop", "1963:12"): "metadataonly",
("prop", "1963:121"): "metadataonly",
("prop", "1963:122"): "metadataonly",
("prop", "1963:123"): "metadataonly",
("prop", "1963:129"): "metadataonly",
("prop", "1963:130"): "metadataonly",
("prop", "1963:131"): "metadataonly",
("prop", "1963:132"): "metadataonly",
("prop", "1963:133"): "metadataonly",
("prop", "1963:134"): "metadataonly",
("prop", "1963:135"): "metadataonly",
("prop", "1963:137"): "metadataonly",
("prop", "1963:138"): "metadataonly",
("prop", "1963:140"): "metadataonly",
("prop", "1963:143"): "metadataonly",
("prop", "1963:146"): "metadataonly",
("prop", "1963:148"): "metadataonly",
("prop", "1963:15"): "metadataonly",
("prop", "1963:150"): "metadataonly",
("prop", "1963:151"): "metadataonly",
("prop", "1963:152"): "metadataonly",
("prop", "1963:153"): "metadataonly",
("prop", "1963:154"): "metadataonly",
("prop", "1963:155"): "metadataonly",
("prop", "1963:157"): "metadataonly",
("prop", "1963:158"): "metadataonly",
("prop", "1963:160"): "metadataonly",
("prop", "1963:163"): "metadataonly",
("prop", "1963:164"): "metadataonly",
("prop", "1963:165"): "metadataonly",
("prop", "1963:166"): "metadataonly",
("prop", "1963:169"): "metadataonly",
("prop", "1963:19"): "metadataonly",
("prop", | |
tell if {ndim}-d cells with {nbounds} vertices "
"are contiguous"
)
# Check cells (j, i) and cells (j, i+1) are contiguous
diff = bounds[:, :-1, 1] - bounds[:, 1:, 0]
if period is not None:
diff = diff % period
if diff.any():
return False
diff = bounds[:, :-1, 2] - bounds[:, 1:, 3]
if period is not None:
diff = diff % period
if diff.any():
return False
# Check cells (j, i) and (j+1, i) are contiguous
diff = bounds[:-1, :, 3] - bounds[1:, :, 0]
if period is not None:
diff = diff % period
if diff.any():
return False
diff = bounds[:-1, :, 2] - bounds[1:, :, 1]
if period is not None:
diff = diff % period
if diff.any():
return False
return True
if ndim > 2:
raise ValueError(f"Can't tell if {ndim}-d cells are contiguous")
if nbounds != 2:
raise ValueError(
f"Can't tell if {ndim}-d cells with {nbounds} vertices "
"are contiguous"
)
lower = bounds[1:, 0]
upper = bounds[:-1, 1]
if not overlap:
diff = lower - upper
if period is not None:
diff = diff % period
return not diff.any()
else:
direction = self.direction()
if direction is None:
return (lower <= upper).all() or (lower >= upper).all()
if direction:
return (lower <= upper).all()
else:
return (lower >= upper).all()
@_deprecated_kwarg_check("i")
@_inplace_enabled(default=False)
def convert_reference_time(
self,
units=None,
calendar_months=False,
calendar_years=False,
inplace=False,
i=False,
):
"""Convert reference time data values to have new units.
Conversion is done by decoding the reference times to
date-time objects and then re-encoding them for the new units.
Any conversions are possible, but this method is primarily for
conversions which require a change in the date-times
originally encoded. For example, use this method to
reinterpret data values in units of "months" since a reference
time to data values in "calendar months" since a reference
time. This is often necessary when units of "calendar months"
were intended but encoded as "months", which have special
definition. See the note and examples below for more details.
For conversions which do not require a change in the
date-times implied by the data values, this method will be
considerably slower than a simple reassignment of the
units. For example, if the original units are ``'days since
2000-12-1'`` then ``c.Units = cf.Units('days since
1901-1-1')`` will give the same result and be considerably
faster than ``c.convert_reference_time(cf.Units('days since
1901-1-1'))``.
.. note:: It is recommended that the units "year" and "month"
be used with caution, as explained in the following
excerpt from the CF conventions: "The Udunits
package defines a year to be exactly 365.242198781
days (the interval between 2 successive passages of
the sun through vernal equinox). It is not a
calendar year. Udunits includes the following
definitions for years: a common_year is 365 days, a
leap_year is 366 days, a Julian_year is 365.25 days,
and a Gregorian_year is 365.2425 days. For similar
reasons the unit ``month``, which is defined to be
exactly year/12, should also be used with caution.
:Parameters:
units: `Units`, optional
The reference time units to convert to. By default the
units days since the original reference time in the
original calendar.
*Parameter example:*
If the original units are ``'months since
2000-1-1'`` in the Gregorian calendar then the
default units to convert to are ``'days since
2000-1-1'`` in the Gregorian calendar.
calendar_months: `bool`, optional
If True then treat units of ``'months'`` as if they
were calendar months (in whichever calendar is
originally specified), rather than a 12th of the
interval between 2 successive passages of the sun
through vernal equinox (i.e. 365.242198781/12 days).
calendar_years: `bool`, optional
If True then treat units of ``'years'`` as if they
were calendar years (in whichever calendar is
originally specified), rather than the interval
between 2 successive passages of the sun through
vernal equinox (i.e. 365.242198781 days).
{{inplace: `bool`, optional}}
{{i: deprecated at version 3.0.0}}
:Returns:
`{{class}}` or `None`
The construct with converted reference time data
values.
**Examples:**
>>> print(f.array)
[1 2 3 4]
>>> f.Units
<Units: months since 2000-1-1>
>>> print(f.datetime_array)
[datetime.datetime(2000, 1, 31, 10, 29, 3, 831197) TODO
datetime.datetime(2000, 3, 1, 20, 58, 7, 662441)
datetime.datetime(2000, 4, 1, 7, 27, 11, 493645)
datetime.datetime(2000, 5, 1, 17, 56, 15, 324889)]
>>> f.convert_reference_time(calendar_months=True, inplace=True)
>>> print(f.datetime_array)
[datetime.datetime(2000, 2, 1, 0, 0) TODOx
datetime.datetime(2000, 3, 1, 0, 0)
datetime.datetime(2000, 4, 1, 0, 0)
datetime.datetime(2000, 5, 1, 0, 0)]
>>> print(f.array)
[ 31. 60. 91. 121.]
>>> f.Units
<Units: days since 2000-1-1>
"""
return self._apply_superclass_data_oper(
_inplace_enabled_define_and_cleanup(self),
"convert_reference_time",
inplace=inplace,
i=i,
units=units,
calendar_months=calendar_months,
calendar_years=calendar_years,
)
def get_property(self, prop, default=ValueError(), bounds=False):
"""Get a CF property.
.. versionadded:: 3.2.0
.. seealso:: `clear_properties`, `del_property`, `has_property`,
`properties`, `set_property`
:Parameters:
prop: `str`
The name of the CF property.
*Parameter example:*
``prop='long_name'``
default: optional
Return the value of the *default* parameter if the
property has not been set.
{{default Exception}}
bounds: `bool`
TODO
:Returns:
The value of the named property or the default value, if
set.
**Examples:**
>>> f = cf.{{class}}()
>>> f.set_property('project', 'CMIP7')
>>> f.has_property('project')
True
>>> f.get_property('project')
'CMIP7'
>>> f.del_property('project')
'CMIP7'
>>> f.has_property('project')
False
>>> print(f.del_property('project', None))
None
>>> print(f.get_property('project', None))
None
"""
out = super().get_property(prop, None)
if out is not None:
return out
if bounds and self.has_bounds():
out = self.get_bounds().get_property(prop, None)
if out is not None:
return out
return super().get_property(prop, default)
@_inplace_enabled(default=False)
def flatten(self, axes=None, inplace=False):
"""Flatten axes of the data.
Any subset of the axes may be flattened.
The shape of the data may change, but the size will not.
The flattening is executed in row-major (C-style) order. For
example, the array ``[[1, 2], [3, 4]]`` would be flattened
across both dimensions to ``[1 2 3 4]``.
.. versionadded:: 3.0.2
.. seealso:: `insert_dimension`, `flip`, `swapaxes`, `transpose`
:Parameters:
axes: (sequence of) int or str, optional
Select the axes. By default all axes are
flattened. The *axes* argument may be one, or a
sequence, of:
* An internal axis identifier. Selects this axis.
* An integer. Selects the axis corresponding to the
given position in the list of axes of the data
array.
No axes are flattened if *axes* is an empty sequence.
{{inplace: `bool`, optional}}
:Returns:
`{{class}}` or `None`
The construct with flattened data, or `None` if the
operation was in-place.
**Examples**
>>> f.shape
(1, 2, 3, 4)
>>> f.flatten().shape
(24,)
>>> f.flatten([1, 3]).shape
(1, 8, 3)
>>> f.flatten([0, -1], inplace=True)
>>> f.shape
(4, 2, 3)
"""
# Note the 'axes' argument can change mid-method meaning it is
# not possible to consolidate this method using a call to
# _apply_superclass_data_operations, despite mostly the same
# logic.
v = _inplace_enabled_define_and_cleanup(self)
super(PropertiesDataBounds, v).flatten(axes, inplace=True)
bounds = v.get_bounds(None)
if bounds is not None:
axes = self._parse_axes(axes)
bounds.flatten(axes, inplace=True)
interior_ring = v.get_interior_ring(None)
if interior_ring is not None:
axes = self._parse_axes(axes)
interior_ring.flatten(axes, inplace=True)
return v
@_deprecated_kwarg_check("i")
@_inplace_enabled(default=False)
def floor(self, bounds=True, inplace=False, i=False):
"""Floor the data array, element-wise.
The floor of ``x`` is the largest integer ``n``, such that ``n <= x``.
.. versionadded:: 1.0
.. seealso:: `ceil`, `rint`, `trunc`
:Parameters:
bounds: `bool`, optional
If False then do not alter any bounds. By default any
bounds are also altered.
{{inplace: `bool`, optional}}
{{i: deprecated at version 3.0.0}}
:Returns:
The construct with floored data. If the operation was
in-place then `None` is returned.
**Examples:**
>>> print(f.array)
[-1.9 -1.5 -1.1 -1. 0. 1. 1.1 1.5 1.9]
>>> print(f.floor().array)
[-2. -2. -2. -1. 0. 1. 1. 1. 1.]
>>> f.floor(inplace=True)
>>> print(f.array)
[-2. -2. -2. -1. 0. 1. 1. 1. 1.]
"""
return self._apply_superclass_data_oper(
_inplace_enabled_define_and_cleanup(self),
"floor",
bounds=bounds,
inplace=inplace,
i=i,
)
def direction(self):
"""Return `None`, indicating that it is not specified whether
the values are increasing or decreasing.
.. versionadded:: 2.0
:Returns:
`None`
**Examples:**
>>> c.direction()
None
"""
return
def match_by_property(self, *mode, **properties):
"""Determine whether or not a variable satisfies conditions.
Conditions may be specified on the variable's attributes and CF
properties.
:Parameters:
:Returns:
`bool`
Whether or not the variable matches the given |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.